noDuffDevice was for Plan 9, but Plan 9 doesn't need it anymore.
It was also being set in s390x, mips, mipsle, and wasm, but
on those systems it had no effect since the SSA rules for those
architectures don't refer to it at all.
Change-Id: Ib85c0832674c714f3ad5091f0a022eb7cd3ebcdf
Reviewed-on: https://go-review.googlesource.com/c/go/+/655878
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
Auto-Submit: Russ Cox <rsc@golang.org>
// Medium copying uses a duff device.
(Move [s] dst src mem)
&& s > 8 && s <= 4*128 && s%4 == 0
- && !config.noDuffDevice && logLargeCopy(v, s) =>
+ && logLargeCopy(v, s) =>
(DUFFCOPY [10*(128-s/4)] dst src mem)
// 10 and 128 are magic constants. 10 is the number of bytes to encode:
// MOVL (SI), CX
// and 128 is the number of such blocks. See src/runtime/duff_386.s:duffcopy.
// Large copying uses REP MOVSL.
-(Move [s] dst src mem) && (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s) =>
+(Move [s] dst src mem) && s > 4*128 && s%4 == 0 && logLargeCopy(v, s) =>
(REPMOVSL dst src (MOVLconst [int32(s/4)]) mem)
// Lowering Zero instructions
// Medium zeroing uses a duff device.
(Zero [s] destptr mem)
- && s > 16 && s <= 4*128 && s%4 == 0
- && !config.noDuffDevice =>
+ && s > 16 && s <= 4*128 && s%4 == 0 =>
(DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
// 1 and 128 are magic constants. 1 is the number of bytes to encode STOSL.
// 128 is the number of STOSL instructions in duffzero.
// Large zeroing uses REP STOSQ.
(Zero [s] destptr mem)
- && (s > 4*128 || (config.noDuffDevice && s > 16))
+ && s > 4*128
&& s%4 == 0 =>
(REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem)
// Medium copying uses a duff device.
(Move [s] dst src mem)
&& s > 64 && s <= 16*64 && s%16 == 0
- && !config.noDuffDevice && logLargeCopy(v, s) =>
+ && logLargeCopy(v, s) =>
(DUFFCOPY [s] dst src mem)
// Large copying uses REP MOVSQ.
-(Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) =>
+(Move [s] dst src mem) && s > 16*64 && s%8 == 0 && logLargeCopy(v, s) =>
(REPMOVSQ dst src (MOVQconst [s/8]) mem)
// Lowering Zero instructions
// Medium zeroing uses a duff device.
(Zero [s] destptr mem)
- && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice =>
+ && s > 64 && s <= 1024 && s%16 == 0 =>
(DUFFZERO [s] destptr mem)
// Large zeroing uses REP STOSQ.
(Zero [s] destptr mem)
- && (s > 1024 || (config.noDuffDevice && s > 64))
- && s%8 == 0 =>
+ && s > 1024 && s%8 == 0 =>
(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
// Lowering constants
// 4 and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%4 == 0 && s > 4 && s <= 512
- && t.Alignment()%4 == 0 && !config.noDuffDevice =>
+ && t.Alignment()%4 == 0 =>
(DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem)
// Large zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0 =>
+ && s > 512 || t.Alignment()%4 != 0 =>
(LoweredZero [t.Alignment()]
ptr
(ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
// 8 and 128 are magic constants, see runtime/mkduff.go
(Move [s] {t} dst src mem)
&& s%4 == 0 && s > 4 && s <= 512
- && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) =>
+ && t.Alignment()%4 == 0 && logLargeCopy(v, s) =>
(DUFFCOPY [8 * (128 - s/4)] dst src mem)
// Large move uses a loop
(Move [s] {t} dst src mem)
- && ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s) =>
+ && (s > 512 || t.Alignment()%4 != 0) && logLargeCopy(v, s) =>
(LoweredMove [t.Alignment()]
dst
src
// medium zeroing uses a duff device
// 4, 16, and 64 are magic constants, see runtime/mkduff.go
(Zero [s] ptr mem)
- && s%16 == 0 && s > 64 && s <= 16*64
- && !config.noDuffDevice =>
+ && s%16 == 0 && s > 64 && s <= 16*64 =>
(DUFFZERO [4 * (64 - s/16)] ptr mem)
// large zeroing uses a loop
(Zero [s] ptr mem)
- && s%16 == 0 && (s > 16*64 || config.noDuffDevice) =>
+ && s%16 == 0 && s > 16*64 =>
(LoweredZero
ptr
(ADDconst <ptr.Type> [s-16] ptr)
// medium move uses a duff device
(Move [s] dst src mem)
&& s > 64 && s <= 16*64 && s%16 == 0
- && !config.noDuffDevice && logLargeCopy(v, s) =>
+ && logLargeCopy(v, s) =>
(DUFFCOPY [8 * (64 - s/16)] dst src mem)
// 8 is the number of bytes to encode:
//
// large move uses a loop
(Move [s] dst src mem)
- && s%16 == 0 && (s > 16*64 || config.noDuffDevice)
+ && s%16 == 0 && s > 16*64
&& logLargeCopy(v, s) =>
(LoweredMove
dst
// medium zeroing uses a duff device
(Zero [s] ptr mem)
- && s%8 == 0 && s > 16 && s <= 8*128
- && !config.noDuffDevice =>
+ && s%8 == 0 && s > 16 && s <= 8*128 =>
(DUFFZERO [8 * (128 - s/8)] ptr mem)
// large zeroing uses a loop
// medium move uses a duff device
(Move [s] dst src mem)
&& s%8 == 0 && s > 16 && s <= 8*128
- && !config.noDuffDevice && logLargeCopy(v, s) =>
+ && logLargeCopy(v, s) =>
(DUFFCOPY [16 * (128 - s/8)] dst src mem)
// 16 and 128 are magic constants. 16 is the number of bytes to encode:
// MOVV (R20), R30
// 8, and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%8 == 0 && s > 24 && s <= 8*128
- && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ && t.Alignment()%8 == 0 =>
(DUFFZERO [8 * (128 - s/8)] ptr mem)
// large or unaligned zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 =>
+ && s > 8*128 || t.Alignment()%8 != 0 =>
(LoweredZero [t.Alignment()]
ptr
(ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)])
// medium move uses a duff device
(Move [s] {t} dst src mem)
&& s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0
- && !config.noDuffDevice && logLargeCopy(v, s) =>
+ && logLargeCopy(v, s) =>
(DUFFCOPY [16 * (128 - s/8)] dst src mem)
// 16 and 128 are magic constants. 16 is the number of bytes to encode:
// MOVV (R1), R23
// 8 and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%8 == 0 && s <= 8*128
- && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ && t.Alignment()%8 == 0 =>
(DUFFZERO [8 * (128 - s/8)] ptr mem)
// Generic zeroing uses a loop
// 16 and 128 are magic constants, see runtime/mkduff.go
(Move [s] {t} dst src mem)
&& s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
- && !config.noDuffDevice && logLargeCopy(v, s) =>
+ && logLargeCopy(v, s) =>
(DUFFCOPY [16 * (128 - s/8)] dst src mem)
// Generic move uses a loop
hasGReg bool // has hardware g register
ctxt *obj.Link // Generic arch information
optimize bool // Do optimization
- noDuffDevice bool // Don't use Duff's device
useAvg bool // Use optimizations that need Avg* operations
useHmul bool // Use optimizations that need Hmul* operations
SoftFloat bool //
c.FPReg = framepointerRegS390X
c.LinkReg = linkRegS390X
c.hasGReg = true
- c.noDuffDevice = true
c.BigEndian = true
c.unalignedOK = true
c.haveBswap64 = true
c.FPReg = framepointerRegMIPS
c.LinkReg = linkRegMIPS
c.hasGReg = true
- c.noDuffDevice = true
case "riscv64":
c.PtrSize = 8
c.RegSize = 8
c.FPReg = framepointerRegWasm
c.LinkReg = linkRegWasm
c.hasGReg = true
- c.noDuffDevice = true
c.useAvg = false
c.useHmul = false
default:
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Move [0] _ _ mem)
// result: mem
return true
}
// match: (Move [s] dst src mem)
- // cond: s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // cond: s > 8 && s <= 4*128 && s%4 == 0 && logLargeCopy(v, s)
// result: (DUFFCOPY [10*(128-s/4)] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ if !(s > 8 && s <= 4*128 && s%4 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(Op386DUFFCOPY)
return true
}
// match: (Move [s] dst src mem)
- // cond: (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)
+ // cond: s > 4*128 && s%4 == 0 && logLargeCopy(v, s)
// result: (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !((s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)) {
+ if !(s > 4*128 && s%4 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(Op386REPMOVSL)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Zero [0] _ mem)
// result: mem
return true
}
// match: (Zero [s] destptr mem)
- // cond: s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice
+ // cond: s > 16 && s <= 4*128 && s%4 == 0
// result: (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
for {
s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- if !(s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice) {
+ if !(s > 16 && s <= 4*128 && s%4 == 0) {
break
}
v.reset(Op386DUFFZERO)
return true
}
// match: (Zero [s] destptr mem)
- // cond: (s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0
+ // cond: s > 4*128 && s%4 == 0
// result: (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem)
for {
s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- if !((s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0) {
+ if !(s > 4*128 && s%4 == 0) {
break
}
v.reset(Op386REPSTOSL)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Move [0] _ _ mem)
// result: mem
return true
}
// match: (Move [s] dst src mem)
- // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)
// result: (DUFFCOPY [s] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(OpAMD64DUFFCOPY)
return true
}
// match: (Move [s] dst src mem)
- // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)
+ // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s)
// result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
+ if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(OpAMD64REPMOVSQ)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Zero [0] _ mem)
// result: mem
return true
}
// match: (Zero [s] destptr mem)
- // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
+ // cond: s > 64 && s <= 1024 && s%16 == 0
// result: (DUFFZERO [s] destptr mem)
for {
s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
+ if !(s > 64 && s <= 1024 && s%16 == 0) {
break
}
v.reset(OpAMD64DUFFZERO)
return true
}
// match: (Zero [s] destptr mem)
- // cond: (s > 1024 || (config.noDuffDevice && s > 64)) && s%8 == 0
+ // cond: s > 1024 && s%8 == 0
// result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
for {
s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- if !((s > 1024 || (config.noDuffDevice && s > 64)) && s%8 == 0) {
+ if !(s > 1024 && s%8 == 0) {
break
}
v.reset(OpAMD64REPSTOSQ)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && logLargeCopy(v, s)
// result: (DUFFCOPY [8 * (128 - s/4)] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(OpARMDUFFCOPY)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)
+ // cond: (s > 512 || t.Alignment()%4 != 0) && logLargeCopy(v, s)
// result: (LoweredMove [t.Alignment()] dst src (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))]) mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)) {
+ if !((s > 512 || t.Alignment()%4 != 0) && logLargeCopy(v, s)) {
break
}
v.reset(OpARMLoweredMove)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0
// result: (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem)
for {
s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice) {
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0) {
break
}
v.reset(OpARMDUFFZERO)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0
+ // cond: s > 512 || t.Alignment()%4 != 0
// result: (LoweredZero [t.Alignment()] ptr (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))]) (MOVWconst [0]) mem)
for {
s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) {
+ if !(s > 512 || t.Alignment()%4 != 0) {
break
}
v.reset(OpARMLoweredZero)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Move [0] _ _ mem)
// result: mem
return true
}
// match: (Move [s] dst src mem)
- // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)
// result: (DUFFCOPY [8 * (64 - s/16)] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(OpARM64DUFFCOPY)
return true
}
// match: (Move [s] dst src mem)
- // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) && logLargeCopy(v, s)
+ // cond: s%16 == 0 && s > 16*64 && logLargeCopy(v, s)
// result: (LoweredMove dst src (ADDconst <src.Type> src [s-16]) mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice) && logLargeCopy(v, s)) {
+ if !(s%16 == 0 && s > 16*64 && logLargeCopy(v, s)) {
break
}
v.reset(OpARM64LoweredMove)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Zero [0] _ mem)
// result: mem
return true
}
// match: (Zero [s] ptr mem)
- // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice
+ // cond: s%16 == 0 && s > 64 && s <= 16*64
// result: (DUFFZERO [4 * (64 - s/16)] ptr mem)
for {
s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
- if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) {
+ if !(s%16 == 0 && s > 64 && s <= 16*64) {
break
}
v.reset(OpARM64DUFFZERO)
return true
}
// match: (Zero [s] ptr mem)
- // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice)
+ // cond: s%16 == 0 && s > 16*64
// result: (LoweredZero ptr (ADDconst <ptr.Type> [s-16] ptr) mem)
for {
s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
- if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) {
+ if !(s%16 == 0 && s > 16*64) {
break
}
v.reset(OpARM64LoweredZero)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Move [0] _ _ mem)
// result: mem
return true
}
// match: (Move [s] dst src mem)
- // cond: s%8 == 0 && s > 16 && s <= 8*128 && !config.noDuffDevice && logLargeCopy(v, s)
+ // cond: s%8 == 0 && s > 16 && s <= 8*128 && logLargeCopy(v, s)
// result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s%8 == 0 && s > 16 && s <= 8*128 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ if !(s%8 == 0 && s > 16 && s <= 8*128 && logLargeCopy(v, s)) {
break
}
v.reset(OpLOONG64DUFFCOPY)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Zero [0] _ mem)
// result: mem
return true
}
// match: (Zero [s] ptr mem)
- // cond: s%8 == 0 && s > 16 && s <= 8*128 && !config.noDuffDevice
+ // cond: s%8 == 0 && s > 16 && s <= 8*128
// result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
for {
s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
- if !(s%8 == 0 && s > 16 && s <= 8*128 && !config.noDuffDevice) {
+ if !(s%8 == 0 && s > 16 && s <= 8*128) {
break
}
v.reset(OpLOONG64DUFFZERO)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && logLargeCopy(v, s)
// result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(OpMIPS64DUFFCOPY)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0
// result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
for {
s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0) {
break
}
v.reset(OpMIPS64DUFFZERO)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0
+ // cond: s > 8*128 || t.Alignment()%8 != 0
// result: (LoweredZero [t.Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)]) mem)
for {
s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) {
+ if !(s > 8*128 || t.Alignment()%8 != 0) {
break
}
v.reset(OpMIPS64LoweredZero)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && logLargeCopy(v, s)
// result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(OpRISCV64DUFFCOPY)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
// result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
for {
s := auxIntToInt64(v.AuxInt)
t := auxToType(v.Aux)
ptr := v_0
mem := v_1
- if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0) {
break
}
v.reset(OpRISCV64DUFFZERO)