From: Giovanni Bajo Date: Sun, 18 Feb 2018 19:02:17 +0000 (+0100) Subject: cmd/compile: fold LEAQ/ADDQconst into SETx ops X-Git-Tag: go1.11beta1~1528 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=0cacc4d0e2a7bde8c50b5ef47ef65679226a1592;p=gostls13.git cmd/compile: fold LEAQ/ADDQconst into SETx ops This saves an instruction and a register. The new rules match ~4900 times during all.bash. Change-Id: I2f867c5e70262004e31f545f3bb89e939c45b718 Reviewed-on: https://go-review.googlesource.com/94767 Reviewed-by: Keith Randall --- diff --git a/src/cmd/compile/internal/gc/asm_test.go b/src/cmd/compile/internal/gc/asm_test.go index 77fa4eb738..3ccf046555 100644 --- a/src/cmd/compile/internal/gc/asm_test.go +++ b/src/cmd/compile/internal/gc/asm_test.go @@ -1143,7 +1143,7 @@ var linuxAMD64Tests = []*asmTest{ return x > 4 } `, - pos: []string{"\tSETHI\t\\("}, + pos: []string{"\tSETHI\t.*\\(SP\\)"}, }, // Check that len() and cap() div by a constant power of two // are compiled into SHRQ. diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 19d0422862..5a0426d2f1 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1074,6 +1074,8 @@ (MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem) (MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem) +(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) -> + (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {sym} base val mem) // Fold constants into stores. (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> @@ -1099,6 +1101,9 @@ (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem) (MOV(Q|L|W|B)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> (MOV(Q|L|W|B)storeconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) +(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)mem [off1+off2] {mergeSym(sym1,sym2)} base val mem) // generating indexed loads and stores (MOV(B|W|L|Q|SS|SD)load [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 45fb955aeb..08f0ab7bd6 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -316,7 +316,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64SETEQ: return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) case OpAMD64SETEQmem: - return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) + return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) || rewriteValueAMD64_OpAMD64SETEQmem_20(v) case OpAMD64SETG: return rewriteValueAMD64_OpAMD64SETG_0(v) case OpAMD64SETGE: @@ -336,7 +336,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64SETNE: return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) case OpAMD64SETNEmem: - return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) + return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) || rewriteValueAMD64_OpAMD64SETNEmem_20(v) case OpAMD64SHLL: return rewriteValueAMD64_OpAMD64SHLL_0(v) case OpAMD64SHLLconst: @@ -35798,6 +35798,59 @@ func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETAEmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETAEmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETAEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETAEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) @@ -35940,6 +35993,59 @@ func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETAmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETAmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETAmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETAmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETAmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETAmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) @@ -36234,6 +36340,59 @@ func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETBEmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETBEmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETBEmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETBEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETBEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETBEmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) @@ -36376,6 +36535,59 @@ func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETBmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETBmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETBmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETBmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) @@ -37119,6 +37331,59 @@ func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETEQmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETEQmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETEQmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETEQmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETEQmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) @@ -37193,6 +37458,11 @@ func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64SETEQmem_20(v *Value) bool { + b := v.Block + _ = b // match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) @@ -37418,6 +37688,59 @@ func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETGEmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETGEmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETGEmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETGEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETGEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETGEmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) @@ -37560,6 +37883,59 @@ func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETGmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETGmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETGmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETGmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETGmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETGmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) @@ -37854,6 +38230,59 @@ func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETLEmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETLEmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETLEmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETLEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETLEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETLEmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) @@ -37996,6 +38425,59 @@ func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETLmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETLmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETLmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETLmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETLmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETLmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) @@ -38739,6 +39221,59 @@ func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SETNEmem [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (SETNEmem [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64SETNEmem) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SETNEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64SETNEmem) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) @@ -38813,6 +39348,11 @@ func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64SETNEmem_20(v *Value) bool { + b := v.Block + _ = b // match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem) // cond: // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem)