p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
}
+ case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
+ ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off()
+ val := sc.Val()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = val
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, off)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
((ADD|SUB|MUL)SSload [off1+off2] {sym} val base mem)
((ADD|SUB|MUL)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL)SDload [off1+off2] {sym} val base mem)
-(ADD(L|Q)constmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
- (ADD(L|Q)constmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
+ ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
+ ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
// Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
((ADD|SUB|MUL)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|MUL)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
-(ADD(L|Q)constmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
- (ADD(L|Q)constmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
+ ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
// generating indexed loads and stores
(MOV(B|W|L|Q|SS|SD)load [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVWQZX (MOVBQZX x)) -> (MOVBQZX x)
(MOVBQZX (MOVBQZX x)) -> (MOVBQZX x)
-(MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
- (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
-(MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ ((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
- (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ ((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
// float <-> int register moves, with no conversion.
// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
{name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, commutative: true, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo)
{name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
- {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
- {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
- {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
- {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
-
- {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
- {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
- {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
- {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
-
- {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
- {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
- {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
- {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
{name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
OpAMD64ANDL
OpAMD64ANDQconst
OpAMD64ANDLconst
+ OpAMD64ANDQconstmodify
+ OpAMD64ANDLconstmodify
OpAMD64ORQ
OpAMD64ORL
OpAMD64ORQconst
OpAMD64ORLconst
+ OpAMD64ORQconstmodify
+ OpAMD64ORLconstmodify
OpAMD64XORQ
OpAMD64XORL
OpAMD64XORQconst
OpAMD64XORLconst
+ OpAMD64XORQconstmodify
+ OpAMD64XORLconstmodify
OpAMD64CMPQ
OpAMD64CMPL
OpAMD64CMPW
},
},
},
+ {
+ name: "ANDQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
{
name: "ORQ",
argLen: 2,
},
},
},
+ {
+ name: "ORQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
{
name: "XORQ",
argLen: 2,
},
},
},
+ {
+ name: "XORQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
{
name: "CMPQ",
argLen: 2,
return rewriteValueAMD64_OpAMD64ANDL_0(v)
case OpAMD64ANDLconst:
return rewriteValueAMD64_OpAMD64ANDLconst_0(v)
+ case OpAMD64ANDLconstmodify:
+ return rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v)
case OpAMD64ANDLload:
return rewriteValueAMD64_OpAMD64ANDLload_0(v)
case OpAMD64ANDQ:
return rewriteValueAMD64_OpAMD64ANDQ_0(v)
case OpAMD64ANDQconst:
return rewriteValueAMD64_OpAMD64ANDQconst_0(v)
+ case OpAMD64ANDQconstmodify:
+ return rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v)
case OpAMD64ANDQload:
return rewriteValueAMD64_OpAMD64ANDQload_0(v)
case OpAMD64BSFQ:
case OpAMD64MOVQloadidx8:
return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v)
case OpAMD64MOVQstore:
- return rewriteValueAMD64_OpAMD64MOVQstore_0(v)
+ return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v)
case OpAMD64MOVQstoreconst:
return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v)
case OpAMD64MOVQstoreconstidx1:
return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v)
case OpAMD64ORLconst:
return rewriteValueAMD64_OpAMD64ORLconst_0(v)
+ case OpAMD64ORLconstmodify:
+ return rewriteValueAMD64_OpAMD64ORLconstmodify_0(v)
case OpAMD64ORLload:
return rewriteValueAMD64_OpAMD64ORLload_0(v)
case OpAMD64ORQ:
return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v)
case OpAMD64ORQconst:
return rewriteValueAMD64_OpAMD64ORQconst_0(v)
+ case OpAMD64ORQconstmodify:
+ return rewriteValueAMD64_OpAMD64ORQconstmodify_0(v)
case OpAMD64ORQload:
return rewriteValueAMD64_OpAMD64ORQload_0(v)
case OpAMD64ROLB:
return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v)
case OpAMD64XORLconst:
return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v)
+ case OpAMD64XORLconstmodify:
+ return rewriteValueAMD64_OpAMD64XORLconstmodify_0(v)
case OpAMD64XORLload:
return rewriteValueAMD64_OpAMD64XORLload_0(v)
case OpAMD64XORQ:
return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v)
case OpAMD64XORQconst:
return rewriteValueAMD64_OpAMD64XORQconst_0(v)
+ case OpAMD64XORQconstmodify:
+ return rewriteValueAMD64_OpAMD64XORQconstmodify_0(v)
case OpAMD64XORQload:
return rewriteValueAMD64_OpAMD64XORQload_0(v)
case OpAdd16:
}
return false
}
+func rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v *Value) bool {
+ // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64ANDLload_0(v *Value) bool {
b := v.Block
_ = b
}
return false
}
+func rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v *Value) bool {
+ // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64ANDQload_0(v *Value) bool {
b := v.Block
_ = b
v.AddArg(mem)
return true
}
+ // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64ORLconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64XORLconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
// cond:
// result: (MOVSSstore [off] {sym} ptr val mem)
v.AddArg(mem)
return true
}
+ // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool {
+ // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64ORQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64XORQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
// cond:
// result: (MOVSDstore [off] {sym} ptr val mem)
}
return false
}
+func rewriteValueAMD64_OpAMD64ORLconstmodify_0(v *Value) bool {
+ // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64ORLload_0(v *Value) bool {
b := v.Block
_ = b
}
return false
}
+func rewriteValueAMD64_OpAMD64ORQconstmodify_0(v *Value) bool {
+ // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64ORQload_0(v *Value) bool {
b := v.Block
_ = b
}
return false
}
+func rewriteValueAMD64_OpAMD64XORLconstmodify_0(v *Value) bool {
+ // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64XORLload_0(v *Value) bool {
b := v.Block
_ = b
}
return false
}
+func rewriteValueAMD64_OpAMD64XORQconstmodify_0(v *Value) bool {
+ // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64XORQload_0(v *Value) bool {
b := v.Block
_ = b
return n
}
+// check direct operation on memory with constant source
+func bitOpOnMem(a []uint32) {
+ // amd64:`ANDL\s[$]200,\s\([A-Z]+\)`
+ a[0] &= 200
+ // amd64:`ORL\s[$]220,\s4\([A-Z]+\)`
+ a[1] |= 220
+ // amd64:`XORL\s[$]240,\s8\([A-Z]+\)`
+ a[2] ^= 240
+}
+
// Check AND masking on arm64 (Issue #19857)
func and_mask_1(a uint64) uint64 {