return rewriteValueAMD64_OpAMD64ANDQmem_0(v)
case OpAMD64BSFQ:
return rewriteValueAMD64_OpAMD64BSFQ_0(v)
- case OpAMD64CMOVLCC:
- return rewriteValueAMD64_OpAMD64CMOVLCC_0(v)
- case OpAMD64CMOVLCS:
- return rewriteValueAMD64_OpAMD64CMOVLCS_0(v)
- case OpAMD64CMOVLEQ:
- return rewriteValueAMD64_OpAMD64CMOVLEQ_0(v)
- case OpAMD64CMOVLGE:
- return rewriteValueAMD64_OpAMD64CMOVLGE_0(v)
- case OpAMD64CMOVLGT:
- return rewriteValueAMD64_OpAMD64CMOVLGT_0(v)
- case OpAMD64CMOVLHI:
- return rewriteValueAMD64_OpAMD64CMOVLHI_0(v)
- case OpAMD64CMOVLLE:
- return rewriteValueAMD64_OpAMD64CMOVLLE_0(v)
- case OpAMD64CMOVLLS:
- return rewriteValueAMD64_OpAMD64CMOVLLS_0(v)
- case OpAMD64CMOVLLT:
- return rewriteValueAMD64_OpAMD64CMOVLLT_0(v)
- case OpAMD64CMOVLNE:
- return rewriteValueAMD64_OpAMD64CMOVLNE_0(v)
- case OpAMD64CMOVQCC:
- return rewriteValueAMD64_OpAMD64CMOVQCC_0(v)
- case OpAMD64CMOVQCS:
- return rewriteValueAMD64_OpAMD64CMOVQCS_0(v)
case OpAMD64CMOVQEQ:
return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v)
- case OpAMD64CMOVQGE:
- return rewriteValueAMD64_OpAMD64CMOVQGE_0(v)
- case OpAMD64CMOVQGT:
- return rewriteValueAMD64_OpAMD64CMOVQGT_0(v)
- case OpAMD64CMOVQHI:
- return rewriteValueAMD64_OpAMD64CMOVQHI_0(v)
- case OpAMD64CMOVQLE:
- return rewriteValueAMD64_OpAMD64CMOVQLE_0(v)
- case OpAMD64CMOVQLS:
- return rewriteValueAMD64_OpAMD64CMOVQLS_0(v)
- case OpAMD64CMOVQLT:
- return rewriteValueAMD64_OpAMD64CMOVQLT_0(v)
- case OpAMD64CMOVQNE:
- return rewriteValueAMD64_OpAMD64CMOVQNE_0(v)
- case OpAMD64CMOVWCC:
- return rewriteValueAMD64_OpAMD64CMOVWCC_0(v)
- case OpAMD64CMOVWCS:
- return rewriteValueAMD64_OpAMD64CMOVWCS_0(v)
- case OpAMD64CMOVWEQ:
- return rewriteValueAMD64_OpAMD64CMOVWEQ_0(v)
- case OpAMD64CMOVWGE:
- return rewriteValueAMD64_OpAMD64CMOVWGE_0(v)
- case OpAMD64CMOVWGT:
- return rewriteValueAMD64_OpAMD64CMOVWGT_0(v)
- case OpAMD64CMOVWHI:
- return rewriteValueAMD64_OpAMD64CMOVWHI_0(v)
- case OpAMD64CMOVWLE:
- return rewriteValueAMD64_OpAMD64CMOVWLE_0(v)
- case OpAMD64CMOVWLS:
- return rewriteValueAMD64_OpAMD64CMOVWLS_0(v)
- case OpAMD64CMOVWLT:
- return rewriteValueAMD64_OpAMD64CMOVWLT_0(v)
- case OpAMD64CMOVWNE:
- return rewriteValueAMD64_OpAMD64CMOVWNE_0(v)
case OpAMD64CMPB:
return rewriteValueAMD64_OpAMD64CMPB_0(v)
case OpAMD64CMPBconst:
return rewriteValueAMD64_OpAMD64SUBSSmem_0(v)
case OpAMD64TESTB:
return rewriteValueAMD64_OpAMD64TESTB_0(v)
- case OpAMD64TESTBconst:
- return rewriteValueAMD64_OpAMD64TESTBconst_0(v)
case OpAMD64TESTL:
return rewriteValueAMD64_OpAMD64TESTL_0(v)
- case OpAMD64TESTLconst:
- return rewriteValueAMD64_OpAMD64TESTLconst_0(v)
case OpAMD64TESTQ:
return rewriteValueAMD64_OpAMD64TESTQ_0(v)
- case OpAMD64TESTQconst:
- return rewriteValueAMD64_OpAMD64TESTQconst_0(v)
case OpAMD64TESTW:
return rewriteValueAMD64_OpAMD64TESTW_0(v)
- case OpAMD64TESTWconst:
- return rewriteValueAMD64_OpAMD64TESTWconst_0(v)
case OpAMD64XADDLlock:
return rewriteValueAMD64_OpAMD64XADDLlock_0(v)
case OpAMD64XADDQlock:
return rewriteValueAMD64_OpCom64_0(v)
case OpCom8:
return rewriteValueAMD64_OpCom8_0(v)
- case OpCondSelect:
- return rewriteValueAMD64_OpCondSelect_0(v) || rewriteValueAMD64_OpCondSelect_10(v) || rewriteValueAMD64_OpCondSelect_20(v) || rewriteValueAMD64_OpCondSelect_30(v) || rewriteValueAMD64_OpCondSelect_40(v)
case OpConst16:
return rewriteValueAMD64_OpConst16_0(v)
case OpConst32:
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool {
- // match: (CMOVLCC x y (InvertFlags cond))
- // cond:
- // result: (CMOVLLS x y cond)
+func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool {
+ // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
+ // cond: c != 0
+ // result: x
for {
_ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpAMD64BSFQ {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpAMD64ORQconst {
+ break
+ }
+ c := v_2_0_0.AuxInt
+ if !(c != 0) {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLLS)
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVLCS_0(v *Value) bool {
- // match: (CMOVLCS x y (InvertFlags cond))
+func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPB x (MOVLconst [c]))
// cond:
- // result: (CMOVLHI x y cond)
+ // result: (CMPBconst x [int64(int8(c))])
for {
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLHI)
+ c := v_1.AuxInt
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int64(int8(c))
v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVLEQ_0(v *Value) bool {
- // match: (CMOVLEQ x y (InvertFlags cond))
+ // match: (CMPB (MOVLconst [c]) x)
// cond:
- // result: (CMOVLEQ x y cond)
+ // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLEQ)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v0.AuxInt = int64(int8(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVLGE_0(v *Value) bool {
- // match: (CMOVLGE x y (InvertFlags cond))
- // cond:
- // result: (CMOVLLE x y cond)
+ // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (CMPBmem {sym} [off] ptr x mem)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ x := v.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLLE)
+ v.reset(OpAMD64CMPBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVLGT_0(v *Value) bool {
- // match: (CMOVLGT x y (InvertFlags cond))
- // cond:
- // result: (CMOVLLT x y cond)
+ // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (InvertFlags (CMPBmem {sym} [off] ptr x mem))
for {
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVBload {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLLT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBmem, types.TypeFlags)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(x)
+ v0.AddArg(mem)
+ v.AddArg(v0)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVLHI_0(v *Value) bool {
- // match: (CMOVLHI x y (InvertFlags cond))
- // cond:
- // result: (CMOVLCS x y cond)
+func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)==int8(y)
+ // result: (FlagEQ)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLCS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ x := v_0.AuxInt
+ if !(int8(x) == int8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVLLE_0(v *Value) bool {
- // match: (CMOVLLE x y (InvertFlags cond))
- // cond:
- // result: (CMOVLGE x y cond)
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLGE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ x := v_0.AuxInt
+ if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVLLS_0(v *Value) bool {
- // match: (CMOVLLS x y (InvertFlags cond))
- // cond:
- // result: (CMOVLCC x y cond)
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
+ // result: (FlagLT_UGT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLCC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ x := v_0.AuxInt
+ if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVLLT_0(v *Value) bool {
- // match: (CMOVLLT x y (InvertFlags cond))
- // cond:
- // result: (CMOVLGT x y cond)
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
+ // result: (FlagGT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLGT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ x := v_0.AuxInt
+ if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVLNE_0(v *Value) bool {
- // match: (CMOVLNE x y (InvertFlags cond))
- // cond:
- // result: (CMOVLNE x y cond)
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
+ // result: (FlagGT_UGT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVLNE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ x := v_0.AuxInt
+ if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVQCC_0(v *Value) bool {
- // match: (CMOVQCC x y (InvertFlags cond))
- // cond:
- // result: (CMOVQLS x y cond)
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int8(m) && int8(m) < int8(n)
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQLS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ m := v_0.AuxInt
+ if !(0 <= int8(m) && int8(m) < int8(n)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQCC _ x (FlagEQ))
+ // match: (CMPBconst (ANDL x y) [0])
// cond:
- // result: x
+ // result: (TESTB x y)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64TESTB)
v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMOVQCC _ x (FlagGT_UGT))
+ // match: (CMPBconst (ANDLconst [c] x) [0])
// cond:
- // result: x
+ // result: (TESTBconst [int64(int8(c))] x)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = int64(int8(c))
v.AddArg(x)
return true
}
- // match: (CMOVQCC y _ (FlagGT_ULT))
+ // match: (CMPBconst x [0])
// cond:
- // result: y
+ // result: (TESTB x x)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ x := v.Args[0]
+ v.reset(OpAMD64TESTB)
+ v.AddArg(x)
+ v.AddArg(x)
return true
}
- // match: (CMOVQCC y _ (FlagLT_ULT))
- // cond:
- // result: y
+ // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
+ // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(c,off)] ptr mem)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ c := v.AuxInt
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
break
}
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(c, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMOVQCC _ x (FlagLT_UGT))
- // cond:
- // result: x
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBmem_0(v *Value) bool {
+ // match: (CMPBmem {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(int64(int8(c)),off)
+ // result: (CMPBconstmem {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
for {
+ off := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_UGT {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validValAndOff(int64(int8(c)), off)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstmem)
+ v.AuxInt = makeValAndOff(int64(int8(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVQCS_0(v *Value) bool {
- // match: (CMOVQCS x y (InvertFlags cond))
+func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPL x (MOVLconst [c]))
// cond:
- // result: (CMOVQHI x y cond)
+ // result: (CMPLconst x [c])
for {
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQHI)
+ c := v_1.AuxInt
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
return true
}
- // match: (CMOVQCS y _ (FlagEQ))
+ // match: (CMPL (MOVLconst [c]) x)
// cond:
- // result: y
+ // result: (InvertFlags (CMPLconst x [c]))
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- // match: (CMOVQCS y _ (FlagGT_UGT))
- // cond:
- // result: y
+ // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (CMPLmem {sym} [off] ptr x mem)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVLload {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQCS _ x (FlagGT_ULT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
+ v.reset(OpAMD64CMPLmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
+ v.AddArg(mem)
return true
}
- // match: (CMOVQCS _ x (FlagLT_ULT))
- // cond:
- // result: x
+ // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (InvertFlags (CMPLmem {sym} [off] ptr x mem))
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVLload {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (CMOVQCS y _ (FlagLT_ULT))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLmem, types.TypeFlags)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(x)
+ v0.AddArg(mem)
+ v.AddArg(v0)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool {
- // match: (CMOVQEQ x y (InvertFlags cond))
- // cond:
- // result: (CMOVQEQ x y cond)
+func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool {
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQEQ)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- // match: (CMOVQEQ _ x (FlagEQ))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
+ x := v_0.AuxInt
+ if !(int32(x) == int32(y)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.reset(OpAMD64FlagEQ)
return true
}
- // match: (CMOVQEQ y _ (FlagGT_UGT))
- // cond:
- // result: y
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQEQ y _ (FlagGT_ULT))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQEQ y _ (FlagLT_ULT))
- // cond:
- // result: y
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ x := v_0.AuxInt
+ if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
return true
}
- // match: (CMOVQEQ y _ (FlagLT_ULT))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
return true
}
- // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
- // cond: c != 0
- // result: x
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpSelect1 {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- v_2_0 := v_2.Args[0]
- if v_2_0.Op != OpAMD64BSFQ {
+ x := v_0.AuxInt
+ if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
break
}
- v_2_0_0 := v_2_0.Args[0]
- if v_2_0_0.Op != OpAMD64ORQconst {
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPLconst (SHRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRLconst {
break
}
- c := v_2_0_0.AuxInt
- if !(c != 0) {
+ c := v_0.AuxInt
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVQGE_0(v *Value) bool {
- // match: (CMOVQGE x y (InvertFlags cond))
- // cond:
- // result: (CMOVQLE x y cond)
+ // match: (CMPLconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int32(m) && int32(m) < int32(n)
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDLconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQLE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ m := v_0.AuxInt
+ if !(0 <= int32(m) && int32(m) < int32(n)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQGE _ x (FlagEQ))
+ // match: (CMPLconst (ANDL x y) [0])
// cond:
- // result: x
+ // result: (TESTL x y)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64TESTL)
v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMOVQGE _ x (FlagGT_UGT))
+ // match: (CMPLconst (ANDLconst [c] x) [0])
// cond:
- // result: x
+ // result: (TESTLconst [c] x)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64TESTLconst)
+ v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (CMOVQGE _ x (FlagGT_ULT))
+ // match: (CMPLconst x [0])
// cond:
- // result: x
+ // result: (TESTL x x)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
+ x := v.Args[0]
+ v.reset(OpAMD64TESTL)
+ v.AddArg(x)
v.AddArg(x)
return true
}
- // match: (CMOVQGE y _ (FlagLT_ULT))
- // cond:
- // result: y
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLconst_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
+ // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ c := v.AuxInt
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
break
}
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(c, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMOVQGE y _ (FlagLT_ULT))
- // cond:
- // result: y
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLmem_0(v *Value) bool {
+ // match: (CMPLmem {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(c,off)
+ // result: (CMPLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
for {
+ off := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validValAndOff(c, off)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstmem)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVQGT_0(v *Value) bool {
- // match: (CMOVQGT x y (InvertFlags cond))
- // cond:
- // result: (CMOVQLT x y cond)
+func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPQconst x [c])
for {
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQLT)
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
return true
}
- // match: (CMOVQGT y _ (FlagEQ))
- // cond:
- // result: y
+ // match: (CMPQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPQconst x [c]))
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQGT _ x (FlagGT_UGT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
+ c := v_0.AuxInt
x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
+ if !(is32Bit(c)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- // match: (CMOVQGT _ x (FlagGT_ULT))
- // cond:
- // result: x
+ // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (CMPQmem {sym} [off] ptr x mem)
for {
- _ = v.Args[2]
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
+ v.reset(OpAMD64CMPQmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
+ v.AddArg(mem)
return true
}
- // match: (CMOVQGT y _ (FlagLT_ULT))
- // cond:
- // result: y
+ // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (InvertFlags (CMPQmem {sym} [off] ptr x mem))
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVQload {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQGT y _ (FlagLT_ULT))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQmem, types.TypeFlags)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(x)
+ v0.AddArg(mem)
+ v.AddArg(v0)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVQHI_0(v *Value) bool {
- // match: (CMOVQHI x y (InvertFlags cond))
+func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool {
+ // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
// cond:
- // result: (CMOVQCS x y cond)
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ if v.AuxInt != 32 {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQCS)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- // match: (CMOVQHI y _ (FlagEQ))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64NEGQ {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQHI _ x (FlagGT_UGT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64ADDQconst {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ if v_0_0.AuxInt != -16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ if v_0_0_0.AuxInt != 15 {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQHI y _ (FlagGT_ULT))
+ // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
// cond:
- // result: y
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
+ if v.AuxInt != 32 {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ if v_0_0.AuxInt != -8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ if v_0_0_0.AuxInt != 7 {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQHI y _ (FlagLT_ULT))
- // cond:
- // result: y
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ x := v_0.AuxInt
+ if !(x == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
return true
}
- // match: (CMOVQHI _ x (FlagLT_UGT))
- // cond:
- // result: x
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x<y && uint64(x)<uint64(y)
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_UGT {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ x := v_0.AuxInt
+ if !(x < y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVQLE_0(v *Value) bool {
- // match: (CMOVQLE x y (InvertFlags cond))
- // cond:
- // result: (CMOVQGE x y cond)
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x<y && uint64(x)>uint64(y)
+ // result: (FlagLT_UGT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQGE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ x := v_0.AuxInt
+ if !(x < y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
return true
}
- // match: (CMOVQLE _ x (FlagEQ))
- // cond:
- // result: x
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x>y && uint64(x)<uint64(y)
+ // result: (FlagGT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ x := v_0.AuxInt
+ if !(x > y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
return true
}
- // match: (CMOVQLE y _ (FlagGT_UGT))
- // cond:
- // result: y
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x>y && uint64(x)>uint64(y)
+ // result: (FlagGT_UGT)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
+ y := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ x := v_0.AuxInt
+ if !(x > y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
return true
}
- // match: (CMOVQLE y _ (FlagGT_ULT))
- // cond:
- // result: y
+ // match: (CMPQconst (MOVBQZX _) [c])
+ // cond: 0xFF < c
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVBQZX {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
+ if !(0xFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQLE _ x (FlagLT_ULT))
- // cond:
- // result: x
+ // match: (CMPQconst (MOVWQZX _) [c])
+ // cond: 0xFFFF < c
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVWQZX {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ if !(0xFFFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQLE _ x (FlagLT_UGT))
- // cond:
- // result: x
+ // match: (CMPQconst (MOVLQZX _) [c])
+ // cond: 0xFFFFFFFF < c
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_UGT {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLQZX {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ if !(0xFFFFFFFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVQLS_0(v *Value) bool {
- // match: (CMOVQLS x y (InvertFlags cond))
- // cond:
- // result: (CMOVQCC x y cond)
+func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMPQconst (SHRQconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
- break
- }
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQCC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- // match: (CMOVQLS _ x (FlagEQ))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (CMOVQLS y _ (FlagGT_UGT))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
- break
- }
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQLS _ x (FlagGT_ULT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (CMOVQLS _ x (FlagLT_ULT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (CMOVQLS y _ (FlagLT_ULT))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
- break
- }
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVQLT_0(v *Value) bool {
- // match: (CMOVQLT x y (InvertFlags cond))
- // cond:
- // result: (CMOVQGT x y cond)
- for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
- break
- }
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQGT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- // match: (CMOVQLT y _ (FlagEQ))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
- break
- }
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQLT y _ (FlagGT_UGT))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
- break
- }
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQLT y _ (FlagGT_ULT))
- // cond:
- // result: y
- for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
- break
- }
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQLT _ x (FlagLT_ULT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (CMOVQLT _ x (FlagLT_UGT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_UGT {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRQconst {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVQNE_0(v *Value) bool {
- // match: (CMOVQNE x y (InvertFlags cond))
- // cond:
- // result: (CMOVQNE x y cond)
- for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ c := v_0.AuxInt
+ if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVQNE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQNE y _ (FlagEQ))
- // cond:
- // result: y
+ // match: (CMPQconst (ANDQconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- y := v.Args[0]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagEQ {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDQconst {
break
}
- v.reset(OpCopy)
- v.Type = y.Type
- v.AddArg(y)
- return true
- }
- // match: (CMOVQNE _ x (FlagGT_UGT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_UGT {
+ m := v_0.AuxInt
+ if !(0 <= m && m < n) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQNE _ x (FlagGT_ULT))
- // cond:
- // result: x
+ // match: (CMPQconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagGT_ULT {
+ n := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDLconst {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (CMOVQNE _ x (FlagLT_ULT))
- // cond:
- // result: x
- for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_ULT {
+ m := v_0.AuxInt
+ if !(0 <= m && m < n) {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMOVQNE _ x (FlagLT_UGT))
+ // match: (CMPQconst (ANDQ x y) [0])
// cond:
- // result: x
+ // result: (TESTQ x y)
for {
- _ = v.Args[2]
- x := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64FlagLT_UGT {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWCC_0(v *Value) bool {
- // match: (CMOVWCC x y (InvertFlags cond))
- // cond:
- // result: (CMOVWLS x y cond)
- for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDQ {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWLS)
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64TESTQ)
v.AddArg(x)
v.AddArg(y)
- v.AddArg(cond)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWCS_0(v *Value) bool {
- // match: (CMOVWCS x y (InvertFlags cond))
+ // match: (CMPQconst (ANDQconst [c] x) [0])
// cond:
- // result: (CMOVWHI x y cond)
+ // result: (TESTQconst [c] x)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ if v.AuxInt != 0 {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWHI)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWEQ_0(v *Value) bool {
- // match: (CMOVWEQ x y (InvertFlags cond))
- // cond:
- // result: (CMOVWEQ x y cond)
- for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDQconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWEQ)
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64TESTQconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWGE_0(v *Value) bool {
- // match: (CMOVWGE x y (InvertFlags cond))
+ // match: (CMPQconst x [0])
// cond:
- // result: (CMOVWLE x y cond)
+ // result: (TESTQ x x)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ if v.AuxInt != 0 {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWLE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWGT_0(v *Value) bool {
- // match: (CMOVWGT x y (InvertFlags cond))
- // cond:
- // result: (CMOVWLT x y cond)
- for {
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
- break
- }
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWLT)
+ v.reset(OpAMD64TESTQ)
v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWHI_0(v *Value) bool {
- // match: (CMOVWHI x y (InvertFlags cond))
- // cond:
- // result: (CMOVWCS x y cond)
- for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
- break
- }
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWCS)
v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWLE_0(v *Value) bool {
- // match: (CMOVWLE x y (InvertFlags cond))
- // cond:
- // result: (CMOVWGE x y cond)
+ // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
+ // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ c := v.AuxInt
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVQload {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWGE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWLS_0(v *Value) bool {
- // match: (CMOVWLS x y (InvertFlags cond))
- // cond:
- // result: (CMOVWCC x y cond)
- for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWCC)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(c, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVWLT_0(v *Value) bool {
- // match: (CMOVWLT x y (InvertFlags cond))
- // cond:
- // result: (CMOVWGT x y cond)
+func rewriteValueAMD64_OpAMD64CMPQmem_0(v *Value) bool {
+ // match: (CMPQmem {sym} [off] ptr (MOVQconst [c]) mem)
+ // cond: validValAndOff(c,off)
+ // result: (CMPQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
for {
+ off := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWGT)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWNE_0(v *Value) bool {
- // match: (CMOVWNE x y (InvertFlags cond))
- // cond:
- // result: (CMOVWNE x y cond)
- for {
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64InvertFlags {
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validValAndOff(c, off)) {
break
}
- cond := v_2.Args[0]
- v.reset(OpAMD64CMOVWNE)
- v.AddArg(x)
- v.AddArg(y)
- v.AddArg(cond)
+ v.reset(OpAMD64CMPQconstmem)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool {
b := v.Block
_ = b
- // match: (CMPB x (MOVLconst [c]))
+ // match: (CMPW x (MOVLconst [c]))
// cond:
- // result: (CMPBconst x [int64(int8(c))])
+ // result: (CMPWconst x [int64(int16(c))])
for {
_ = v.Args[1]
x := v.Args[0]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64CMPBconst)
- v.AuxInt = int64(int8(c))
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int64(int16(c))
v.AddArg(x)
return true
}
- // match: (CMPB (MOVLconst [c]) x)
+ // match: (CMPW (MOVLconst [c]) x)
// cond:
- // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
+ // result: (InvertFlags (CMPWconst x [int64(int16(c))]))
for {
_ = v.Args[1]
v_0 := v.Args[0]
c := v_0.AuxInt
x := v.Args[1]
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v0.AuxInt = int64(int8(c))
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int64(int16(c))
v0.AddArg(x)
v.AddArg(v0)
return true
}
- // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
+ // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (CMPBmem {sym} [off] ptr x mem)
+ // result: (CMPWmem {sym} [off] ptr x mem)
for {
_ = v.Args[1]
l := v.Args[0]
- if l.Op != OpAMD64MOVBload {
+ if l.Op != OpAMD64MOVWload {
break
}
off := l.AuxInt
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64CMPBmem)
+ v.reset(OpAMD64CMPWmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
+ // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (InvertFlags (CMPBmem {sym} [off] ptr x mem))
+ // result: (InvertFlags (CMPWmem {sym} [off] ptr x mem))
for {
_ = v.Args[1]
x := v.Args[0]
l := v.Args[1]
- if l.Op != OpAMD64MOVBload {
+ if l.Op != OpAMD64MOVWload {
break
}
off := l.AuxInt
break
}
v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWmem, types.TypeFlags)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool {
b := v.Block
_ = b
- // match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)==int8(y)
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)==int16(y)
// result: (FlagEQ)
for {
y := v.AuxInt
break
}
x := v_0.AuxInt
- if !(int8(x) == int8(y)) {
+ if !(int16(x) == int16(y)) {
break
}
v.reset(OpAMD64FlagEQ)
return true
}
- // match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<int16(y) && uint16(x)<uint16(y)
// result: (FlagLT_ULT)
for {
y := v.AuxInt
break
}
x := v_0.AuxInt
- if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
+ if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
break
}
v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<int16(y) && uint16(x)>uint16(y)
// result: (FlagLT_UGT)
for {
y := v.AuxInt
break
}
x := v_0.AuxInt
- if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
+ if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
break
}
v.reset(OpAMD64FlagLT_UGT)
return true
}
- // match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>int16(y) && uint16(x)<uint16(y)
// result: (FlagGT_ULT)
for {
y := v.AuxInt
break
}
x := v_0.AuxInt
- if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
+ if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
break
}
v.reset(OpAMD64FlagGT_ULT)
return true
}
- // match: (CMPBconst (MOVLconst [x]) [y])
- // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>int16(y) && uint16(x)>uint16(y)
// result: (FlagGT_UGT)
for {
y := v.AuxInt
break
}
x := v_0.AuxInt
- if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
+ if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
break
}
v.reset(OpAMD64FlagGT_UGT)
return true
}
- // match: (CMPBconst (ANDLconst _ [m]) [n])
- // cond: 0 <= int8(m) && int8(m) < int8(n)
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int16(m) && int16(m) < int16(n)
// result: (FlagLT_ULT)
for {
n := v.AuxInt
break
}
m := v_0.AuxInt
- if !(0 <= int8(m) && int8(m) < int8(n)) {
+ if !(0 <= int16(m) && int16(m) < int16(n)) {
break
}
v.reset(OpAMD64FlagLT_ULT)
return true
}
- // match: (CMPBconst (ANDL x y) [0])
+ // match: (CMPWconst (ANDL x y) [0])
// cond:
- // result: (TESTB x y)
+ // result: (TESTW x y)
for {
if v.AuxInt != 0 {
break
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
- v.reset(OpAMD64TESTB)
+ v.reset(OpAMD64TESTW)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (CMPBconst (ANDLconst [c] x) [0])
+ // match: (CMPWconst (ANDLconst [c] x) [0])
// cond:
- // result: (TESTBconst [int64(int8(c))] x)
+ // result: (TESTWconst [int64(int16(c))] x)
for {
if v.AuxInt != 0 {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- v.reset(OpAMD64TESTBconst)
- v.AuxInt = int64(int8(c))
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = int64(int16(c))
v.AddArg(x)
return true
}
- // match: (CMPBconst x [0])
+ // match: (CMPWconst x [0])
// cond:
- // result: (TESTB x x)
+ // result: (TESTW x x)
for {
if v.AuxInt != 0 {
break
}
x := v.Args[0]
- v.reset(OpAMD64TESTB)
+ v.reset(OpAMD64TESTW)
v.AddArg(x)
v.AddArg(x)
return true
}
- // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
+ // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
// cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
- // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(c,off)] ptr mem)
for {
c := v.AuxInt
l := v.Args[0]
- if l.Op != OpAMD64MOVBload {
+ if l.Op != OpAMD64MOVWload {
break
}
off := l.AuxInt
break
}
b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = makeValAndOff(c, off)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPBmem_0(v *Value) bool {
- // match: (CMPBmem {sym} [off] ptr (MOVLconst [c]) mem)
- // cond: validValAndOff(int64(int8(c)),off)
- // result: (CMPBconstmem {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
+func rewriteValueAMD64_OpAMD64CMPWmem_0(v *Value) bool {
+ // match: (CMPWmem {sym} [off] ptr (MOVLconst [c]) mem)
+ // cond: validValAndOff(int64(int16(c)),off)
+ // result: (CMPWconstmem {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
for {
off := v.AuxInt
sym := v.Aux
}
c := v_1.AuxInt
mem := v.Args[2]
- if !(validValAndOff(int64(int8(c)), off)) {
+ if !(validValAndOff(int64(int16(c)), off)) {
break
}
- v.reset(OpAMD64CMPBconstmem)
- v.AuxInt = makeValAndOff(int64(int8(c)), off)
+ v.reset(OpAMD64CMPWconstmem)
+ v.AuxInt = makeValAndOff(int64(int16(c)), off)
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (CMPL x (MOVLconst [c]))
- // cond:
- // result: (CMPLconst x [c])
+func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool {
+ // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // cond: is32Bit(off1+off2)
+ // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- c := v_1.AuxInt
- v.reset(OpAMD64CMPLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (CMPL (MOVLconst [c]) x)
- // cond:
- // result: (InvertFlags (CMPLconst x [c]))
- for {
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v0.AuxInt = c
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (CMPLmem {sym} [off] ptr x mem)
- for {
- _ = v.Args[1]
- l := v.Args[0]
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- x := v.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ old := v.Args[1]
+ new_ := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64CMPLmem)
- v.AuxInt = off
+ v.reset(OpAMD64CMPXCHGLlock)
+ v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
- v.AddArg(x)
+ v.AddArg(old)
+ v.AddArg(new_)
v.AddArg(mem)
return true
}
- // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (InvertFlags (CMPLmem {sym} [off] ptr x mem))
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool {
+ // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // cond: is32Bit(off1+off2)
+ // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVLload {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ old := v.Args[1]
+ new_ := v.Args[2]
+ mem := v.Args[3]
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLmem, types.TypeFlags)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v.reset(OpAMD64CMPXCHGQlock)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(old)
+ v.AddArg(new_)
+ v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool {
- // match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)==int32(y)
- // result: (FlagEQ)
+func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool {
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (LEAL [c+d] {s} x)
for {
- y := v.AuxInt
+ c := v.AuxInt
+ s := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ if v_0.Op != OpAMD64ADDLconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) == int32(y)) {
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
break
}
- v.reset(OpAMD64FlagEQ)
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
return true
}
- // match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
- // result: (FlagLT_ULT)
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool {
+ // match: (LEAQ [c] {s} (ADDQconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (LEAQ [c+d] {s} x)
for {
- y := v.AuxInt
+ c := v.AuxInt
+ s := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- x := v_0.AuxInt
- if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
return true
}
- // match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
- // result: (FlagLT_UGT)
+ // match: (LEAQ [c] {s} (ADDQ x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAQ1 [c] {s} x y)
for {
- y := v.AuxInt
+ c := v.AuxInt
+ s := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ if v_0.Op != OpAMD64ADDQ {
break
}
- x := v_0.AuxInt
- if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(x.Op != OpSB && y.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagLT_UGT)
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
- // result: (FlagGT_ULT)
+ // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
for {
- y := v.AuxInt
+ off1 := v.AuxInt
+ sym1 := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ if v_0.Op != OpAMD64LEAQ {
break
}
- x := v_0.AuxInt
- if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64FlagGT_ULT)
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
return true
}
- // match: (CMPLconst (MOVLconst [x]) [y])
- // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
- // result: (FlagGT_UGT)
+ // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
- y := v.AuxInt
+ off1 := v.AuxInt
+ sym1 := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ if v_0.Op != OpAMD64LEAQ1 {
break
}
- x := v_0.AuxInt
- if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64FlagGT_UGT)
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPLconst (SHRLconst _ [c]) [n])
- // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
- // result: (FlagLT_ULT)
+ // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
- n := v.AuxInt
+ off1 := v.AuxInt
+ sym1 := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHRLconst {
+ if v_0.Op != OpAMD64LEAQ2 {
break
}
- c := v_0.AuxInt
- if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPLconst (ANDLconst _ [m]) [n])
- // cond: 0 <= int32(m) && int32(m) < int32(n)
- // result: (FlagLT_ULT)
+ // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
- n := v.AuxInt
+ off1 := v.AuxInt
+ sym1 := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDLconst {
+ if v_0.Op != OpAMD64LEAQ4 {
break
}
- m := v_0.AuxInt
- if !(0 <= int32(m) && int32(m) < int32(n)) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPLconst (ANDL x y) [0])
- // cond:
- // result: (TESTL x y)
+ // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
- if v.AuxInt != 0 {
- break
- }
+ off1 := v.AuxInt
+ sym1 := v.Aux
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDL {
+ if v_0.Op != OpAMD64LEAQ8 {
break
}
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
_ = v_0.Args[1]
x := v_0.Args[0]
y := v_0.Args[1]
- v.reset(OpAMD64TESTL)
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (CMPLconst (ANDLconst [c] x) [0])
- // cond:
- // result: (TESTLconst [c] x)
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool {
+ // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAQ1 [c+d] {s} x y)
for {
- if v.AuxInt != 0 {
- break
- }
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDLconst {
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- c := v_0.AuxInt
+ d := v_0.AuxInt
x := v_0.Args[0]
- v.reset(OpAMD64TESTLconst)
- v.AuxInt = c
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = c + d
+ v.Aux = s
v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPLconst x [0])
- // cond:
- // result: (TESTL x x)
+ // match: (LEAQ1 [c] {s} y (ADDQconst [d] x))
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAQ1 [c+d] {s} x y)
for {
- if v.AuxInt != 0 {
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ y := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- x := v.Args[0]
- v.reset(OpAMD64TESTL)
- v.AddArg(x)
+ d := v_1.AuxInt
+ x := v_1.Args[0]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = c + d
+ v.Aux = s
v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMPLconst_10(v *Value) bool {
- b := v.Block
- _ = b
- // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
- // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
- // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
+ // cond:
+ // result: (LEAQ2 [c] {s} x y)
for {
c := v.AuxInt
- l := v.Args[0]
- if l.Op != OpAMD64MOVLload {
+ s := v.Aux
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLQconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
+ if v_1.AuxInt != 1 {
break
}
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = makeValAndOff(c, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMPLmem_0(v *Value) bool {
- // match: (CMPLmem {sym} [off] ptr (MOVLconst [c]) mem)
- // cond: validValAndOff(c,off)
- // result: (CMPLconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+ // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x)
+ // cond:
+ // result: (LEAQ2 [c] {s} x y)
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLQconst {
break
}
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(validValAndOff(c, off)) {
+ if v_0.AuxInt != 1 {
break
}
- v.reset(OpAMD64CMPLconstmem)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (CMPQ x (MOVQconst [c]))
- // cond: is32Bit(c)
- // result: (CMPQconst x [c])
+ // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
+ // cond:
+ // result: (LEAQ4 [c] {s} x y)
for {
+ c := v.AuxInt
+ s := v.Aux
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
+ if v_1.Op != OpAMD64SHLQconst {
break
}
- c := v_1.AuxInt
- if !(is32Bit(c)) {
+ if v_1.AuxInt != 2 {
break
}
- v.reset(OpAMD64CMPQconst)
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
v.AuxInt = c
+ v.Aux = s
v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQ (MOVQconst [c]) x)
- // cond: is32Bit(c)
- // result: (InvertFlags (CMPQconst x [c]))
+ // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x)
+ // cond:
+ // result: (LEAQ4 [c] {s} x y)
for {
+ c := v.AuxInt
+ s := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ if v_0.Op != OpAMD64SHLQconst {
break
}
- c := v_0.AuxInt
- x := v.Args[1]
- if !(is32Bit(c)) {
+ if v_0.AuxInt != 2 {
break
}
- v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v0.AuxInt = c
- v0.AddArg(x)
- v.AddArg(v0)
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (CMPQmem {sym} [off] ptr x mem)
+ // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
+ // cond:
+ // result: (LEAQ8 [c] {s} x y)
for {
+ c := v.AuxInt
+ s := v.Aux
_ = v.Args[1]
- l := v.Args[0]
- if l.Op != OpAMD64MOVQload {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLQconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- x := v.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ if v_1.AuxInt != 3 {
break
}
- v.reset(OpAMD64CMPQmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c
+ v.Aux = s
v.AddArg(x)
- v.AddArg(mem)
+ v.AddArg(y)
return true
}
- // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (InvertFlags (CMPQmem {sym} [off] ptr x mem))
+ // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x)
+ // cond:
+ // result: (LEAQ8 [c] {s} x y)
for {
+ c := v.AuxInt
+ s := v.Aux
_ = v.Args[1]
- x := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVQload {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLQconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ if v_0.AuxInt != 3 {
break
}
- v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQmem, types.TypeFlags)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(x)
- v0.AddArg(mem)
- v.AddArg(v0)
+ y := v_0.Args[0]
+ x := v.Args[1]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool {
- // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
- // cond:
- // result: (FlagLT_ULT)
+ // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
- if v.AuxInt != 32 {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64NEGQ {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64ADDQconst {
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x))
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ y := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
break
}
- if v_0_0.AuxInt != -16 {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ x := v_1.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64ANDQconst {
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool {
+ // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAQ2 [c+d] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- if v_0_0_0.AuxInt != 15 {
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
- // cond:
- // result: (FlagLT_ULT)
+ // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(c+2*d) && y.Op != OpSB
+ // result: (LEAQ2 [c+2*d] {s} x y)
for {
- if v.AuxInt != 32 {
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64NEGQ {
+ d := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(is32Bit(c+2*d) && y.Op != OpSB) {
break
}
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64ADDQconst {
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = c + 2*d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
+ // cond:
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLQconst {
break
}
- if v_0_0.AuxInt != -8 {
+ if v_1.AuxInt != 1 {
break
}
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64ANDQconst {
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
+ // cond:
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLQconst {
break
}
- if v_0_0_0.AuxInt != 7 {
+ if v_1.AuxInt != 2 {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x==y
- // result: (FlagEQ)
+ // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
- y := v.AuxInt
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ if v_0.Op != OpAMD64LEAQ {
break
}
- x := v_0.AuxInt
- if !(x == y) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagEQ)
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x<y && uint64(x)<uint64(y)
- // result: (FlagLT_ULT)
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool {
+ // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAQ4 [c+d] {s} x y)
for {
- y := v.AuxInt
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- x := v_0.AuxInt
- if !(x < y && uint64(x) < uint64(y)) {
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x<y && uint64(x)>uint64(y)
- // result: (FlagLT_UGT)
+ // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(c+4*d) && y.Op != OpSB
+ // result: (LEAQ4 [c+4*d] {s} x y)
for {
- y := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- x := v_0.AuxInt
- if !(x < y && uint64(x) > uint64(y)) {
+ d := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(is32Bit(c+4*d) && y.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagLT_UGT)
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = c + 4*d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x>y && uint64(x)<uint64(y)
- // result: (FlagGT_ULT)
+ // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
+ // cond:
+ // result: (LEAQ8 [c] {s} x y)
for {
- y := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLQconst {
break
}
- x := v_0.AuxInt
- if !(x > y && uint64(x) < uint64(y)) {
+ if v_1.AuxInt != 1 {
break
}
- v.reset(OpAMD64FlagGT_ULT)
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (MOVQconst [x]) [y])
- // cond: x>y && uint64(x)>uint64(y)
- // result: (FlagGT_UGT)
+ // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
- y := v.AuxInt
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ if v_0.Op != OpAMD64LEAQ {
break
}
- x := v_0.AuxInt
- if !(x > y && uint64(x) > uint64(y)) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagGT_UGT)
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (MOVBQZX _) [c])
- // cond: 0xFF < c
- // result: (FlagLT_ULT)
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool {
+ // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(c+d) && x.Op != OpSB
+ // result: (LEAQ8 [c+d] {s} x y)
for {
c := v.AuxInt
+ s := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBQZX {
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- if !(0xFF < c) {
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(c+d) && x.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c + d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (MOVWQZX _) [c])
- // cond: 0xFFFF < c
- // result: (FlagLT_ULT)
+ // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(c+8*d) && y.Op != OpSB
+ // result: (LEAQ8 [c+8*d] {s} x y)
for {
c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVWQZX {
+ s := v.Aux
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- if !(0xFFFF < c) {
+ d := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(is32Bit(c+8*d) && y.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c + 8*d
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (CMPQconst (MOVLQZX _) [c])
- // cond: 0xFFFFFFFF < c
- // result: (FlagLT_ULT)
+ // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
for {
- c := v.AuxInt
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLQZX {
+ if v_0.Op != OpAMD64LEAQ {
break
}
- if !(0xFFFFFFFF < c) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ x := v_0.Args[0]
+ y := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool {
+func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool {
b := v.Block
_ = b
- // match: (CMPQconst (SHRQconst _ [c]) [n])
- // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
- // result: (FlagLT_ULT)
+ // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
for {
- n := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHRQconst {
+ x := v.Args[0]
+ if x.Op != OpAMD64MOVBload {
break
}
- c := v_0.AuxInt
- if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[1]
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMPQconst (ANDQconst _ [m]) [n])
- // cond: 0 <= m && m < n
- // result: (FlagLT_ULT)
+ // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
for {
- n := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDQconst {
+ x := v.Args[0]
+ if x.Op != OpAMD64MOVWload {
break
}
- m := v_0.AuxInt
- if !(0 <= m && m < n) {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[1]
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMPQconst (ANDLconst _ [m]) [n])
- // cond: 0 <= m && m < n
- // result: (FlagLT_ULT)
+ // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
for {
- n := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDLconst {
+ x := v.Args[0]
+ if x.Op != OpAMD64MOVLload {
break
}
- m := v_0.AuxInt
- if !(0 <= m && m < n) {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[1]
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMPQconst (ANDQ x y) [0])
- // cond:
- // result: (TESTQ x y)
+ // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
for {
- if v.AuxInt != 0 {
+ x := v.Args[0]
+ if x.Op != OpAMD64MOVQload {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDQ {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[1]
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- v.reset(OpAMD64TESTQ)
- v.AddArg(x)
- v.AddArg(y)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMPQconst (ANDQconst [c] x) [0])
- // cond:
- // result: (TESTQconst [c] x)
+ // match: (MOVBQSX (ANDLconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDLconst [c & 0x7f] x)
for {
- if v.AuxInt != 0 {
- break
- }
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDQconst {
+ if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- v.reset(OpAMD64TESTQconst)
- v.AuxInt = c
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = c & 0x7f
v.AddArg(x)
return true
}
- // match: (CMPQconst x [0])
+ // match: (MOVBQSX (MOVBQSX x))
// cond:
- // result: (TESTQ x x)
+ // result: (MOVBQSX x)
for {
- if v.AuxInt != 0 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVBQSX {
break
}
- x := v.Args[0]
- v.reset(OpAMD64TESTQ)
- v.AddArg(x)
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
v.AddArg(x)
return true
}
- // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
- // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
- // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
- for {
- c := v.AuxInt
- l := v.Args[0]
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
- break
- }
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = makeValAndOff(c, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
return false
}
-func rewriteValueAMD64_OpAMD64CMPQmem_0(v *Value) bool {
- // match: (CMPQmem {sym} [off] ptr (MOVQconst [c]) mem)
- // cond: validValAndOff(c,off)
- // result: (CMPQconstmem {sym} [makeValAndOff(c,off)] ptr mem)
+func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool {
+ // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBQSX x)
for {
off := v.AuxInt
sym := v.Aux
- _ = v.Args[2]
+ _ = v.Args[1]
ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(validValAndOff(c, off)) {
+ if v_1.Op != OpAMD64MOVBstore {
break
}
- v.reset(OpAMD64CMPQconstmem)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (CMPW x (MOVLconst [c]))
- // cond:
- // result: (CMPWconst x [int64(int16(c))])
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[2]
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- c := v_1.AuxInt
- v.reset(OpAMD64CMPWconst)
- v.AuxInt = int64(int16(c))
+ v.reset(OpAMD64MOVBQSX)
v.AddArg(x)
return true
}
- // match: (CMPW (MOVLconst [c]) x)
- // cond:
- // result: (InvertFlags (CMPWconst x [int64(int16(c))]))
+ // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
_ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v0.AuxInt = int64(int16(c))
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (CMPWmem {sym} [off] ptr x mem)
- for {
- _ = v.Args[1]
- l := v.Args[0]
- if l.Op != OpAMD64MOVWload {
+ if v_0.Op != OpAMD64LEAQ {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- x := v.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64CMPWmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
+ v.reset(OpAMD64MOVBQSXload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (InvertFlags (CMPWmem {sym} [off] ptr x mem))
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
for {
- _ = v.Args[1]
x := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVWload {
+ if x.Op != OpAMD64MOVBload {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[1]
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(OpAMD64InvertFlags)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWmem, types.TypeFlags)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
- v0.AddArg(x)
v0.AddArg(mem)
- v.AddArg(v0)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)==int16(y)
- // result: (FlagEQ)
- for {
- y := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- x := v_0.AuxInt
- if !(int16(x) == int16(y)) {
- break
- }
- v.reset(OpAMD64FlagEQ)
- return true
- }
- // match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)<int16(y) && uint16(x)<uint16(y)
- // result: (FlagLT_ULT)
- for {
- y := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- x := v_0.AuxInt
- if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
- break
- }
- v.reset(OpAMD64FlagLT_ULT)
- return true
- }
- // match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)<int16(y) && uint16(x)>uint16(y)
- // result: (FlagLT_UGT)
- for {
- y := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- x := v_0.AuxInt
- if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
- break
- }
- v.reset(OpAMD64FlagLT_UGT)
return true
}
- // match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)>int16(y) && uint16(x)<uint16(y)
- // result: (FlagGT_ULT)
+ // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
for {
- y := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ x := v.Args[0]
+ if x.Op != OpAMD64MOVWload {
break
}
- x := v_0.AuxInt
- if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[1]
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(OpAMD64FlagGT_ULT)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMPWconst (MOVLconst [x]) [y])
- // cond: int16(x)>int16(y) && uint16(x)>uint16(y)
- // result: (FlagGT_UGT)
+ // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
for {
- y := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ x := v.Args[0]
+ if x.Op != OpAMD64MOVLload {
break
}
- x := v_0.AuxInt
- if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[1]
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(OpAMD64FlagGT_UGT)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMPWconst (ANDLconst _ [m]) [n])
- // cond: 0 <= int16(m) && int16(m) < int16(n)
- // result: (FlagLT_ULT)
+ // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
for {
- n := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDLconst {
+ x := v.Args[0]
+ if x.Op != OpAMD64MOVQload {
break
}
- m := v_0.AuxInt
- if !(0 <= int16(m) && int16(m) < int16(n)) {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[1]
+ ptr := x.Args[0]
+ mem := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- v.reset(OpAMD64FlagLT_ULT)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (CMPWconst (ANDL x y) [0])
- // cond:
- // result: (TESTW x y)
+ // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
for {
- if v.AuxInt != 0 {
+ x := v.Args[0]
+ if x.Op != OpAMD64MOVBloadidx1 {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDL {
+ off := x.AuxInt
+ sym := x.Aux
+ _ = x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ mem := x.Args[2]
+ if !(x.Uses == 1 && clobber(x)) {
break
}
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- v.reset(OpAMD64TESTW)
- v.AddArg(x)
- v.AddArg(y)
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(idx)
+ v0.AddArg(mem)
return true
}
- // match: (CMPWconst (ANDLconst [c] x) [0])
+ // match: (MOVBQZX (ANDLconst [c] x))
// cond:
- // result: (TESTWconst [int64(int16(c))] x)
+ // result: (ANDLconst [c & 0xff] x)
for {
- if v.AuxInt != 0 {
- break
- }
v_0 := v.Args[0]
if v_0.Op != OpAMD64ANDLconst {
break
}
c := v_0.AuxInt
x := v_0.Args[0]
- v.reset(OpAMD64TESTWconst)
- v.AuxInt = int64(int16(c))
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = c & 0xff
v.AddArg(x)
return true
}
- // match: (CMPWconst x [0])
+ // match: (MOVBQZX (MOVBQZX x))
// cond:
- // result: (TESTW x x)
+ // result: (MOVBQZX x)
for {
- if v.AuxInt != 0 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVBQZX {
break
}
- x := v.Args[0]
- v.reset(OpAMD64TESTW)
- v.AddArg(x)
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
v.AddArg(x)
return true
}
- // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
- // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l)
- // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(c,off)] ptr mem)
- for {
- c := v.AuxInt
- l := v.Args[0]
- if l.Op != OpAMD64MOVWload {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
- break
- }
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = makeValAndOff(c, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
return false
}
-func rewriteValueAMD64_OpAMD64CMPWmem_0(v *Value) bool {
- // match: (CMPWmem {sym} [off] ptr (MOVLconst [c]) mem)
- // cond: validValAndOff(int64(int16(c)),off)
- // result: (CMPWconstmem {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
+func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBQZX x)
for {
off := v.AuxInt
sym := v.Aux
- _ = v.Args[2]
+ _ = v.Args[1]
ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ if v_1.Op != OpAMD64MOVBstore {
break
}
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(validValAndOff(int64(int16(c)), off)) {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ _ = v_1.Args[2]
+ ptr2 := v_1.Args[0]
+ x := v_1.Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
break
}
- v.reset(OpAMD64CMPWconstmem)
- v.AuxInt = makeValAndOff(int64(int16(c)), off)
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool {
- // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
- // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
- _ = v.Args[3]
+ _ = v.Args[1]
v_0 := v.Args[0]
if v_0.Op != OpAMD64ADDQconst {
break
}
off2 := v_0.AuxInt
ptr := v_0.Args[0]
- old := v.Args[1]
- new_ := v.Args[2]
- mem := v.Args[3]
+ mem := v.Args[1]
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64CMPXCHGLlock)
+ v.reset(OpAMD64MOVBload)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(ptr)
- v.AddArg(old)
- v.AddArg(new_)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool {
- // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
- // cond: is32Bit(off1+off2)
- // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
+ // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[3]
+ sym1 := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
+ if v_0.Op != OpAMD64LEAQ {
break
}
off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- old := v.Args[1]
- new_ := v.Args[2]
- mem := v.Args[3]
- if !(is32Bit(off1 + off2)) {
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64CMPXCHGQlock)
+ v.reset(OpAMD64MOVBload)
v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(old)
- v.AddArg(new_)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool {
- // match: (LEAL [c] {s} (ADDLconst [d] x))
- // cond: is32Bit(c+d)
- // result: (LEAL [c+d] {s} x)
+ // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
for {
- c := v.AuxInt
- s := v.Aux
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDLconst {
+ if v_0.Op != OpAMD64LEAQ1 {
break
}
- d := v_0.AuxInt
- x := v_0.Args[0]
- if !(is32Bit(c + d)) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ _ = v_0.Args[1]
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64LEAL)
- v.AuxInt = c + d
- v.Aux = s
- v.AddArg(x)
+ v.reset(OpAMD64MOVBloadidx1)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool {
- // match: (LEAQ [c] {s} (ADDQconst [d] x))
- // cond: is32Bit(c+d)
- // result: (LEAQ [c+d] {s} x)
+ // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVBloadidx1 [off] {sym} ptr idx mem)
for {
- c := v.AuxInt
- s := v.Aux
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- if !(is32Bit(c + d)) {
- break
- }
- v.reset(OpAMD64LEAQ)
- v.AuxInt = c + d
- v.Aux = s
- v.AddArg(x)
- return true
- }
- // match: (LEAQ [c] {s} (ADDQ x y))
- // cond: x.Op != OpSB && y.Op != OpSB
- // result: (LEAQ1 [c] {s} x y)
- for {
- c := v.AuxInt
- s := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQ {
+ if v_0.Op != OpAMD64ADDQ {
break
}
_ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- if !(x.Op != OpSB && y.Op != OpSB) {
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ mem := v.Args[1]
+ if !(ptr.Op != OpSB) {
break
}
- v.reset(OpAMD64LEAQ1)
- v.AuxInt = c
- v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64MOVBloadidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
+ // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
+ if v_0.Op != OpAMD64LEAL {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
- x := v_0.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
break
}
- v.reset(OpAMD64LEAQ)
+ v.reset(OpAMD64MOVBload)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
+ v.AddArg(base)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
- sym1 := v.Aux
+ sym := v.Aux
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ1 {
+ if v_0.Op != OpAMD64ADDLconst {
break
}
off2 := v_0.AuxInt
- sym2 := v_0.Aux
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ ptr := v_0.Args[0]
+ mem := v.Args[1]
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64LEAQ1)
+ v.reset(OpAMD64MOVBload)
v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
+ // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
+ // cond: is32Bit(c+d)
+ // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ c := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ2 {
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ d := v_0.AuxInt
+ ptr := v_0.Args[0]
+ idx := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(c + d)) {
break
}
- v.reset(OpAMD64LEAQ2)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64MOVBloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
+ // cond: is32Bit(c+d)
+ // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ4 {
+ c := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ idx := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ d := v_1.AuxInt
+ ptr := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(c + d)) {
break
}
- v.reset(OpAMD64LEAQ4)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64MOVBloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
+ // cond: is32Bit(c+d)
+ // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ8 {
+ c := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ d := v_1.AuxInt
+ idx := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(c + d)) {
break
}
- v.reset(OpAMD64LEAQ8)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64MOVBloadidx1)
+ v.AuxInt = c + d
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool {
- // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
- // cond: is32Bit(c+d) && x.Op != OpSB
- // result: (LEAQ1 [c+d] {s} x y)
+ // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
+ // cond: is32Bit(c+d)
+ // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
for {
c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
+ sym := v.Aux
+ _ = v.Args[2]
v_0 := v.Args[0]
if v_0.Op != OpAMD64ADDQconst {
break
}
d := v_0.AuxInt
- x := v_0.Args[0]
- y := v.Args[1]
- if !(is32Bit(c+d) && x.Op != OpSB) {
+ idx := v_0.Args[0]
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(c + d)) {
break
}
- v.reset(OpAMD64LEAQ1)
+ v.reset(OpAMD64MOVBloadidx1)
v.AuxInt = c + d
- v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [c] {s} y (ADDQconst [d] x))
- // cond: is32Bit(c+d) && x.Op != OpSB
- // result: (LEAQ1 [c+d] {s} x y)
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool {
+ // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
+ // cond: y.Uses == 1
+ // result: (SETLmem [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- y := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETL {
break
}
- d := v_1.AuxInt
- x := v_1.Args[0]
- if !(is32Bit(c+d) && x.Op != OpSB) {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64LEAQ1)
- v.AuxInt = c + d
- v.Aux = s
+ v.reset(OpAMD64SETLmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
- // cond:
- // result: (LEAQ2 [c] {s} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETLEmem [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETLE {
break
}
- if v_1.AuxInt != 1 {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- y := v_1.Args[0]
- v.reset(OpAMD64LEAQ2)
- v.AuxInt = c
- v.Aux = s
+ v.reset(OpAMD64SETLEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x)
- // cond:
- // result: (LEAQ2 [c] {s} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
+ // cond: y.Uses == 1
+ // result: (SETGmem [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHLQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETG {
break
}
- if v_0.AuxInt != 1 {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpAMD64LEAQ2)
- v.AuxInt = c
- v.Aux = s
+ v.reset(OpAMD64SETGmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
- // cond:
- // result: (LEAQ4 [c] {s} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETGEmem [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETGE {
break
}
- if v_1.AuxInt != 2 {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- y := v_1.Args[0]
- v.reset(OpAMD64LEAQ4)
- v.AuxInt = c
- v.Aux = s
+ v.reset(OpAMD64SETGEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x)
- // cond:
- // result: (LEAQ4 [c] {s} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
+ // cond: y.Uses == 1
+ // result: (SETEQmem [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHLQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETEQ {
break
}
- if v_0.AuxInt != 2 {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpAMD64LEAQ4)
- v.AuxInt = c
- v.Aux = s
+ v.reset(OpAMD64SETEQmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
- // cond:
- // result: (LEAQ8 [c] {s} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETNEmem [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETNE {
break
}
- if v_1.AuxInt != 3 {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- y := v_1.Args[0]
- v.reset(OpAMD64LEAQ8)
- v.AuxInt = c
- v.Aux = s
+ v.reset(OpAMD64SETNEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x)
- // cond:
- // result: (LEAQ8 [c] {s} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
+ // cond: y.Uses == 1
+ // result: (SETBmem [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHLQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETB {
break
}
- if v_0.AuxInt != 3 {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- y := v_0.Args[0]
- x := v.Args[1]
- v.reset(OpAMD64LEAQ8)
- v.AuxInt = c
- v.Aux = s
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETBEmem [off] {sym} ptr x mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETBE {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- x := v_0.Args[0]
- y := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64LEAQ1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.reset(OpAMD64SETBEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x))
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
+ // cond: y.Uses == 1
+ // result: (SETAmem [off] {sym} ptr x mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[1]
- y := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64LEAQ {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETA {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- x := v_1.Args[0]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64LEAQ1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool {
- // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
- // cond: is32Bit(c+d) && x.Op != OpSB
- // result: (LEAQ2 [c+d] {s} x y)
- for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- y := v.Args[1]
- if !(is32Bit(c+d) && x.Op != OpSB) {
- break
- }
- v.reset(OpAMD64LEAQ2)
- v.AuxInt = c + d
- v.Aux = s
+ v.reset(OpAMD64SETAmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
- // cond: is32Bit(c+2*d) && y.Op != OpSB
- // result: (LEAQ2 [c+2*d] {s} x y)
+ // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETAEmem [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64SETAE {
break
}
- d := v_1.AuxInt
- y := v_1.Args[0]
- if !(is32Bit(c+2*d) && y.Op != OpSB) {
+ x := y.Args[0]
+ mem := v.Args[2]
+ if !(y.Uses == 1) {
break
}
- v.reset(OpAMD64LEAQ2)
- v.AuxInt = c + 2*d
- v.Aux = s
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
// cond:
- // result: (LEAQ4 [c] {s} x y)
+ // result: (MOVBstore [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLQconst {
- break
- }
- if v_1.AuxInt != 1 {
+ if v_1.Op != OpAMD64MOVBQSX {
break
}
- y := v_1.Args[0]
- v.reset(OpAMD64LEAQ4)
- v.AuxInt = c
- v.Aux = s
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
+ // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
// cond:
- // result: (LEAQ8 [c] {s} x y)
+ // result: (MOVBstore [off] {sym} ptr x mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLQconst {
- break
- }
- if v_1.AuxInt != 2 {
+ if v_1.Op != OpAMD64MOVBQZX {
break
}
- y := v_1.Args[0]
- v.reset(OpAMD64LEAQ8)
- v.AuxInt = c
- v.Aux = s
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[1]
+ sym := v.Aux
+ _ = v.Args[2]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
+ if v_0.Op != OpAMD64ADDQconst {
break
}
off2 := v_0.AuxInt
- sym2 := v_0.Aux
- x := v_0.Args[0]
- y := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ ptr := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64LEAQ2)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool {
- // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
- // cond: is32Bit(c+d) && x.Op != OpSB
- // result: (LEAQ4 [c+d] {s} x y)
- for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- y := v.Args[1]
- if !(is32Bit(c+d) && x.Op != OpSB) {
- break
- }
- v.reset(OpAMD64LEAQ4)
- v.AuxInt = c + d
- v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
- // cond: is32Bit(c+4*d) && y.Op != OpSB
- // result: (LEAQ4 [c+4*d] {s} x y)
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // cond: validOff(off)
+ // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- d := v_1.AuxInt
- y := v_1.Args[0]
- if !(is32Bit(c+4*d) && y.Op != OpSB) {
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(validOff(off)) {
break
}
- v.reset(OpAMD64LEAQ4)
- v.AuxInt = c + 4*d
- v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = makeValAndOff(int64(int8(c)), off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
- // cond:
- // result: (LEAQ8 [c] {s} x y)
+ // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLQconst {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
break
}
- if v_1.AuxInt != 1 {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- y := v_1.Args[0]
- v.reset(OpAMD64LEAQ8)
- v.AuxInt = c
- v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
- _ = v.Args[1]
+ _ = v.Args[2]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
+ if v_0.Op != OpAMD64LEAQ1 {
break
}
off2 := v_0.AuxInt
sym2 := v_0.Aux
- x := v_0.Args[0]
- y := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ _ = v_0.Args[1]
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64LEAQ4)
+ v.reset(OpAMD64MOVBstoreidx1)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool {
- // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
- // cond: is32Bit(c+d) && x.Op != OpSB
- // result: (LEAQ8 [c+d] {s} x y)
+ // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
+ // cond: ptr.Op != OpSB
+ // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
for {
- c := v.AuxInt
- s := v.Aux
- _ = v.Args[1]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
+ if v_0.Op != OpAMD64ADDQ {
break
}
- d := v_0.AuxInt
- x := v_0.Args[0]
- y := v.Args[1]
- if !(is32Bit(c+d) && x.Op != OpSB) {
+ _ = v_0.Args[1]
+ ptr := v_0.Args[0]
+ idx := v_0.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(ptr.Op != OpSB) {
break
}
- v.reset(OpAMD64LEAQ8)
- v.AuxInt = c + d
- v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64MOVBstoreidx1)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
- // cond: is32Bit(c+8*d) && y.Op != OpSB
- // result: (LEAQ8 [c+8*d] {s} x y)
+ // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
+ // cond: x0.Uses == 1 && clobber(x0)
+ // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
for {
- c := v.AuxInt
+ i := v.AuxInt
s := v.Aux
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
+ _ = v.Args[2]
+ p := v.Args[0]
+ w := v.Args[1]
+ x0 := v.Args[2]
+ if x0.Op != OpAMD64MOVBstore {
break
}
- d := v_1.AuxInt
- y := v_1.Args[0]
- if !(is32Bit(c+8*d) && y.Op != OpSB) {
+ if x0.AuxInt != i-1 {
break
}
- v.reset(OpAMD64LEAQ8)
- v.AuxInt = c + 8*d
- v.Aux = s
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
- // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
+ if x0.Aux != s {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- x := v_0.Args[0]
- y := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
break
}
- v.reset(OpAMD64LEAQ8)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
- for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVBload {
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRWconst {
break
}
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[1]
- ptr := x.Args[0]
- mem := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
+ if x0_1.AuxInt != 8 {
break
}
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
- // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
- for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVWload {
+ if w != x0_1.Args[0] {
break
}
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[1]
- ptr := x.Args[0]
- mem := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
+ mem := x0.Args[2]
+ if !(x0.Uses == 1 && clobber(x0)) {
break
}
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
- v.reset(OpCopy)
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = i - 1
+ v.Aux = s
+ v.AddArg(p)
+ v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type)
+ v0.AuxInt = 8
+ v0.AddArg(w)
v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v.AddArg(mem)
return true
}
- // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)
+ // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVLload {
+ i := v.AuxInt
+ s := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ w := v.Args[1]
+ x2 := v.Args[2]
+ if x2.Op != OpAMD64MOVBstore {
break
}
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[1]
- ptr := x.Args[0]
- mem := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
+ if x2.AuxInt != i-1 {
break
}
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
- // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
- for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVQload {
+ if x2.Aux != s {
break
}
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[1]
- ptr := x.Args[0]
- mem := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
break
}
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBQSXload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
- // match: (MOVBQSX (ANDLconst [c] x))
- // cond: c & 0x80 == 0
- // result: (ANDLconst [c & 0x7f] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDLconst {
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRLconst {
break
}
- c := v_0.AuxInt
- x := v_0.Args[0]
- if !(c&0x80 == 0) {
+ if x2_1.AuxInt != 8 {
break
}
- v.reset(OpAMD64ANDLconst)
- v.AuxInt = c & 0x7f
- v.AddArg(x)
- return true
- }
- // match: (MOVBQSX (MOVBQSX x))
- // cond:
- // result: (MOVBQSX x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBQSX {
- break
- }
- x := v_0.Args[0]
- v.reset(OpAMD64MOVBQSX)
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool {
- // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
- // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
- // result: (MOVBQSX x)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[1]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBstore {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
- x := v_1.Args[1]
- if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
- break
- }
- v.reset(OpAMD64MOVBQSX)
- v.AddArg(x)
- return true
- }
- // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- mem := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpAMD64MOVBQSXload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
- for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVBload {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[1]
- ptr := x.Args[0]
- mem := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
- // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
- for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVWload {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[1]
- ptr := x.Args[0]
- mem := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
- // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
- for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVLload {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[1]
- ptr := x.Args[0]
- mem := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
- // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
- for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVQload {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[1]
- ptr := x.Args[0]
- mem := x.Args[1]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
- // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
- // cond: x.Uses == 1 && clobber(x)
- // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
- for {
- x := v.Args[0]
- if x.Op != OpAMD64MOVBloadidx1 {
- break
- }
- off := x.AuxInt
- sym := x.Aux
- _ = x.Args[2]
- ptr := x.Args[0]
- idx := x.Args[1]
- mem := x.Args[2]
- if !(x.Uses == 1 && clobber(x)) {
- break
- }
- b = x.Block
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = off
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(idx)
- v0.AddArg(mem)
- return true
- }
- // match: (MOVBQZX (ANDLconst [c] x))
- // cond:
- // result: (ANDLconst [c & 0xff] x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ANDLconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64ANDLconst)
- v.AuxInt = c & 0xff
- v.AddArg(x)
- return true
- }
- // match: (MOVBQZX (MOVBQZX x))
- // cond:
- // result: (MOVBQZX x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVBQZX {
- break
- }
- x := v_0.Args[0]
- v.reset(OpAMD64MOVBQZX)
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
- // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
- // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
- // result: (MOVBQZX x)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[1]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBstore {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- _ = v_1.Args[2]
- ptr2 := v_1.Args[0]
- x := v_1.Args[1]
- if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
- break
- }
- v.reset(OpAMD64MOVBQZX)
- v.AddArg(x)
- return true
- }
- // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVBload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(OpAMD64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- mem := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpAMD64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- _ = v_0.Args[1]
- ptr := v_0.Args[0]
- idx := v_0.Args[1]
- mem := v.Args[1]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpAMD64MOVBloadidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
- // cond: ptr.Op != OpSB
- // result: (MOVBloadidx1 [off] {sym} ptr idx mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQ {
- break
- }
- _ = v_0.Args[1]
- ptr := v_0.Args[0]
- idx := v_0.Args[1]
- mem := v.Args[1]
- if !(ptr.Op != OpSB) {
- break
- }
- v.reset(OpAMD64MOVBloadidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
- // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAL {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
- break
- }
- v.reset(OpAMD64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVBload [off1+off2] {sym} ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDLconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(OpAMD64MOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
- // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
- // cond: is32Bit(c+d)
- // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- d := v_0.AuxInt
- ptr := v_0.Args[0]
- idx := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(c + d)) {
- break
- }
- v.reset(OpAMD64MOVBloadidx1)
- v.AuxInt = c + d
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem)
- // cond: is32Bit(c+d)
- // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- idx := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- d := v_1.AuxInt
- ptr := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(c + d)) {
- break
- }
- v.reset(OpAMD64MOVBloadidx1)
- v.AuxInt = c + d
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
- // cond: is32Bit(c+d)
- // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- d := v_1.AuxInt
- idx := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(c + d)) {
- break
- }
- v.reset(OpAMD64MOVBloadidx1)
- v.AuxInt = c + d
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem)
- // cond: is32Bit(c+d)
- // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
- for {
- c := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- d := v_0.AuxInt
- idx := v_0.Args[0]
- ptr := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(c + d)) {
- break
- }
- v.reset(OpAMD64MOVBloadidx1)
- v.AuxInt = c + d
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool {
- // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
- // cond: y.Uses == 1
- // result: (SETLmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETL {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETLmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
- // cond: y.Uses == 1
- // result: (SETLEmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETLE {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETLEmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
- // cond: y.Uses == 1
- // result: (SETGmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETG {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETGmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
- // cond: y.Uses == 1
- // result: (SETGEmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETGE {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETGEmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
- // cond: y.Uses == 1
- // result: (SETEQmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETEQ {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETEQmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
- // cond: y.Uses == 1
- // result: (SETNEmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETNE {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETNEmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
- // cond: y.Uses == 1
- // result: (SETBmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETB {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETBmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
- // cond: y.Uses == 1
- // result: (SETBEmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETBE {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETBEmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
- // cond: y.Uses == 1
- // result: (SETAmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETA {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETAmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
- // cond: y.Uses == 1
- // result: (SETAEmem [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- y := v.Args[1]
- if y.Op != OpAMD64SETAE {
- break
- }
- x := y.Args[0]
- mem := v.Args[2]
- if !(y.Uses == 1) {
- break
- }
- v.reset(OpAMD64SETAEmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
- b := v.Block
- _ = b
- // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
- // cond:
- // result: (MOVBstore [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBQSX {
- break
- }
- x := v_1.Args[0]
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
- // cond:
- // result: (MOVBstore [off] {sym} ptr x mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVBQZX {
- break
- }
- x := v_1.Args[0]
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
- // result: (MOVBstore [off1+off2] {sym} ptr val mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- off2 := v_0.AuxInt
- ptr := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
- // cond: validOff(off)
- // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(validOff(off)) {
- break
- }
- v.reset(OpAMD64MOVBstoreconst)
- v.AuxInt = makeValAndOff(int64(int8(c)), off)
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ1 {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- _ = v_0.Args[1]
- ptr := v_0.Args[0]
- idx := v_0.Args[1]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpAMD64MOVBstoreidx1)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
- // cond: ptr.Op != OpSB
- // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQ {
- break
- }
- _ = v_0.Args[1]
- ptr := v_0.Args[0]
- idx := v_0.Args[1]
- val := v.Args[1]
- mem := v.Args[2]
- if !(ptr.Op != OpSB) {
- break
- }
- v.reset(OpAMD64MOVBstoreidx1)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(idx)
- v.AddArg(val)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
- // cond: x0.Uses == 1 && clobber(x0)
- // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
- for {
- i := v.AuxInt
- s := v.Aux
- _ = v.Args[2]
- p := v.Args[0]
- w := v.Args[1]
- x0 := v.Args[2]
- if x0.Op != OpAMD64MOVBstore {
- break
- }
- if x0.AuxInt != i-1 {
- break
- }
- if x0.Aux != s {
- break
- }
- _ = x0.Args[2]
- if p != x0.Args[0] {
- break
- }
- x0_1 := x0.Args[1]
- if x0_1.Op != OpAMD64SHRWconst {
- break
- }
- if x0_1.AuxInt != 8 {
- break
- }
- if w != x0_1.Args[0] {
- break
- }
- mem := x0.Args[2]
- if !(x0.Uses == 1 && clobber(x0)) {
- break
- }
- v.reset(OpAMD64MOVWstore)
- v.AuxInt = i - 1
- v.Aux = s
- v.AddArg(p)
- v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type)
- v0.AuxInt = 8
- v0.AddArg(w)
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
- // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)
- // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
- for {
- i := v.AuxInt
- s := v.Aux
- _ = v.Args[2]
- p := v.Args[0]
- w := v.Args[1]
- x2 := v.Args[2]
- if x2.Op != OpAMD64MOVBstore {
- break
- }
- if x2.AuxInt != i-1 {
- break
- }
- if x2.Aux != s {
- break
- }
- _ = x2.Args[2]
- if p != x2.Args[0] {
- break
- }
- x2_1 := x2.Args[1]
- if x2_1.Op != OpAMD64SHRLconst {
- break
- }
- if x2_1.AuxInt != 8 {
- break
- }
- if w != x2_1.Args[0] {
+ if w != x2_1.Args[0] {
break
}
x1 := x2.Args[2]
if v_1_0.Op != OpAMD64ANDLconst {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SARQ)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool {
- // match: (SARQconst x [0])
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (SARQconst [c] (MOVQconst [d]))
- // cond:
- // result: (MOVQconst [d>>uint64(c)])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = d >> uint64(c)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool {
- // match: (SARW x (MOVQconst [c]))
- // cond:
- // result: (SARWconst [min(c&31,15)] x)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARWconst)
- v.AuxInt = min(c&31, 15)
- v.AddArg(x)
- return true
- }
- // match: (SARW x (MOVLconst [c]))
- // cond:
- // result: (SARWconst [min(c&31,15)] x)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64SARWconst)
- v.AuxInt = min(c&31, 15)
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool {
- // match: (SARWconst x [0])
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (SARWconst [c] (MOVQconst [d]))
- // cond:
- // result: (MOVQconst [int64(int16(d))>>uint64(c)])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64(int16(d)) >> uint64(c)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool {
- // match: (SBBLcarrymask (FlagEQ))
- // cond:
- // result: (MOVLconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagEQ {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- // match: (SBBLcarrymask (FlagLT_ULT))
- // cond:
- // result: (MOVLconst [-1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_ULT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = -1
- return true
- }
- // match: (SBBLcarrymask (FlagLT_UGT))
- // cond:
- // result: (MOVLconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_UGT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- // match: (SBBLcarrymask (FlagGT_ULT))
- // cond:
- // result: (MOVLconst [-1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_ULT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = -1
- return true
- }
- // match: (SBBLcarrymask (FlagGT_UGT))
- // cond:
- // result: (MOVLconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_UGT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool {
- // match: (SBBQcarrymask (FlagEQ))
- // cond:
- // result: (MOVQconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagEQ {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
- // match: (SBBQcarrymask (FlagLT_ULT))
- // cond:
- // result: (MOVQconst [-1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_ULT {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = -1
- return true
- }
- // match: (SBBQcarrymask (FlagLT_UGT))
- // cond:
- // result: (MOVQconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_UGT {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
- // match: (SBBQcarrymask (FlagGT_ULT))
- // cond:
- // result: (MOVQconst [-1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_ULT {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = -1
- return true
- }
- // match: (SBBQcarrymask (FlagGT_UGT))
- // cond:
- // result: (MOVQconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_UGT {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool {
- // match: (SETA (InvertFlags x))
- // cond:
- // result: (SETB x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64InvertFlags {
- break
- }
- x := v_0.Args[0]
- v.reset(OpAMD64SETB)
- v.AddArg(x)
- return true
- }
- // match: (SETA (FlagEQ))
- // cond:
- // result: (MOVLconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagEQ {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- // match: (SETA (FlagLT_ULT))
- // cond:
- // result: (MOVLconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_ULT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- // match: (SETA (FlagLT_UGT))
- // cond:
- // result: (MOVLconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_UGT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
- return true
- }
- // match: (SETA (FlagGT_ULT))
- // cond:
- // result: (MOVLconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_ULT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- // match: (SETA (FlagGT_UGT))
- // cond:
- // result: (MOVLconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_UGT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool {
- // match: (SETAE (InvertFlags x))
- // cond:
- // result: (SETBE x)
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64InvertFlags {
- break
- }
- x := v_0.Args[0]
- v.reset(OpAMD64SETBE)
- v.AddArg(x)
- return true
- }
- // match: (SETAE (FlagEQ))
- // cond:
- // result: (MOVLconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagEQ {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
- return true
- }
- // match: (SETAE (FlagLT_ULT))
- // cond:
- // result: (MOVLconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_ULT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- // match: (SETAE (FlagLT_UGT))
- // cond:
- // result: (MOVLconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_UGT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
- return true
- }
- // match: (SETAE (FlagGT_ULT))
- // cond:
- // result: (MOVLconst [0])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_ULT {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- // match: (SETAE (FlagGT_UGT))
- // cond:
- // result: (MOVLconst [1])
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_UGT {
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
break
}
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.reset(OpAMD64SARQ)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem)
+func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool {
+ // match: (SARQconst x [0])
// cond:
- // result: (SETBEmem [off] {sym} ptr x mem)
+ // result: x
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64InvertFlags {
+ if v.AuxInt != 0 {
break
}
- x := v_1.Args[0]
- mem := v.Args[2]
- v.reset(OpAMD64SETBEmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v.AddArg(mem)
return true
}
- // match: (SETAEmem [off1] {sym} (ADDQconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
- // result: (SETAEmem [off1+off2] {sym} base val mem)
+ // match: (SARQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [d>>uint64(c)])
for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
+ c := v.AuxInt
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpAMD64SETAEmem)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = d >> uint64(c)
return true
}
- // match: (SETAEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETAEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool {
+ // match: (SARW x (MOVQconst [c]))
+ // cond:
+ // result: (SARWconst [min(c&31,15)] x)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpAMD64SETAEmem)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ c := v_1.AuxInt
+ v.reset(OpAMD64SARWconst)
+ v.AuxInt = min(c&31, 15)
+ v.AddArg(x)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SARW x (MOVLconst [c]))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (SARWconst [min(c&31,15)] x)
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagEQ {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ c := v_1.AuxInt
+ v.reset(OpAMD64SARWconst)
+ v.AuxInt = min(c&31, 15)
+ v.AddArg(x)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool {
+ // match: (SARWconst x [0])
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: x
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagLT_ULT {
+ if v.AuxInt != 0 {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SARWconst [c] (MOVQconst [d]))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVQconst [int64(int16(d))>>uint64(c)])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagLT_UGT {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64(int16(d)) >> uint64(c)
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool {
+ // match: (SBBLcarrymask (FlagEQ))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVLconst [0])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagGT_ULT {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagEQ {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SBBLcarrymask (FlagLT_ULT))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVLconst [-1])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagGT_UGT {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = -1
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SBBLcarrymask (FlagLT_UGT))
// cond:
- // result: (SETBmem [off] {sym} ptr x mem)
+ // result: (MOVLconst [0])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64InvertFlags {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- x := v_1.Args[0]
- mem := v.Args[2]
- v.reset(OpAMD64SETBmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v.AddArg(x)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (SETAmem [off1] {sym} (ADDQconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
- // result: (SETAmem [off1+off2] {sym} base val mem)
+ // match: (SBBLcarrymask (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [-1])
for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
- break
- }
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
+ if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64SETAmem)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = -1
return true
}
- // match: (SETAmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETAmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // match: (SBBLcarrymask (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
- break
- }
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64SETAmem)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool {
+ // match: (SBBQcarrymask (FlagEQ))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVQconst [0])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagEQ {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagEQ {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SBBQcarrymask (FlagLT_ULT))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVQconst [-1])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagLT_ULT {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = -1
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SBBQcarrymask (FlagLT_UGT))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVQconst [0])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagLT_UGT {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SBBQcarrymask (FlagGT_ULT))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVQconst [-1])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagGT_ULT {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = -1
return true
}
- // match: (SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SBBQcarrymask (FlagGT_UGT))
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVQconst [0])
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagGT_UGT {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool {
- // match: (SETB (InvertFlags x))
+func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool {
+ // match: (SETA (InvertFlags x))
// cond:
- // result: (SETA x)
+ // result: (SETB x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64SETA)
+ v.reset(OpAMD64SETB)
v.AddArg(x)
return true
}
- // match: (SETB (FlagEQ))
+ // match: (SETA (FlagEQ))
// cond:
// result: (MOVLconst [0])
for {
v.AuxInt = 0
return true
}
- // match: (SETB (FlagLT_ULT))
+ // match: (SETA (FlagLT_ULT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETB (FlagLT_UGT))
+ // match: (SETA (FlagLT_UGT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
- // match: (SETB (FlagGT_ULT))
+ // match: (SETA (FlagGT_ULT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETB (FlagGT_UGT))
+ // match: (SETA (FlagGT_UGT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool {
- // match: (SETBE (InvertFlags x))
+func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool {
+ // match: (SETAE (InvertFlags x))
// cond:
- // result: (SETAE x)
+ // result: (SETBE x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64SETAE)
+ v.reset(OpAMD64SETBE)
v.AddArg(x)
return true
}
- // match: (SETBE (FlagEQ))
+ // match: (SETAE (FlagEQ))
// cond:
// result: (MOVLconst [1])
for {
v.AuxInt = 1
return true
}
- // match: (SETBE (FlagLT_ULT))
+ // match: (SETAE (FlagLT_ULT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETBE (FlagLT_UGT))
+ // match: (SETAE (FlagLT_UGT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
- // match: (SETBE (FlagGT_ULT))
+ // match: (SETAE (FlagGT_ULT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETBE (FlagGT_UGT))
+ // match: (SETAE (FlagGT_UGT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETAEmem [off] {sym} ptr x mem)
+ // result: (SETBEmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64SETBEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETAEmem [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETBEmem [off1+off2] {sym} base val mem)
+ // result: (SETAEmem [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETBEmem)
+ v.reset(OpAMD64SETAEmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETAEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETBEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETAEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETBEmem)
+ v.reset(OpAMD64SETAEmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETAmem [off] {sym} ptr x mem)
+ // result: (SETBmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETAmem)
+ v.reset(OpAMD64SETBmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETAmem [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETBmem [off1+off2] {sym} base val mem)
+ // result: (SETAmem [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETAmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETAmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETBmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETAmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETAmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
- // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
- // cond: !config.nacl
- // result: (SETAE (BTL x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64SHLL {
- break
- }
- _ = v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVLconst {
- break
- }
- if v_0_0_0.AuxInt != 1 {
- break
- }
- x := v_0_0.Args[1]
- y := v_0.Args[1]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x)))
- // cond: !config.nacl
- // result: (SETAE (BTL x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTL {
- break
- }
- _ = v_0.Args[1]
- y := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64SHLL {
- break
- }
- _ = v_0_1.Args[1]
- v_0_1_0 := v_0_1.Args[0]
- if v_0_1_0.Op != OpAMD64MOVLconst {
- break
- }
- if v_0_1_0.AuxInt != 1 {
- break
- }
- x := v_0_1.Args[1]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
- // cond: !config.nacl
- // result: (SETAE (BTQ x y))
+func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool {
+ // match: (SETB (InvertFlags x))
+ // cond:
+ // result: (SETA x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64SHLQ {
- break
- }
- _ = v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVQconst {
- break
- }
- if v_0_0_0.AuxInt != 1 {
- break
- }
- x := v_0_0.Args[1]
- y := v_0.Args[1]
- if !(!config.nacl) {
+ if v_0.Op != OpAMD64InvertFlags {
break
}
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETA)
+ v.AddArg(x)
return true
}
- // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x)))
- // cond: !config.nacl
- // result: (SETAE (BTQ x y))
+ // match: (SETB (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- y := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64SHLQ {
- break
- }
- _ = v_0_1.Args[1]
- v_0_1_0 := v_0_1.Args[0]
- if v_0_1_0.Op != OpAMD64MOVQconst {
- break
- }
- if v_0_1_0.AuxInt != 1 {
- break
- }
- x := v_0_1.Args[1]
- if !(!config.nacl) {
+ if v_0.Op != OpAMD64FlagEQ {
break
}
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (SETEQ (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(c) && !config.nacl
- // result: (SETAE (BTLconst [log2uint32(c)] x))
+ // match: (SETB (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTLconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
- if !(isUint32PowerOfTwo(c) && !config.nacl) {
+ if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = log2uint32(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 1
return true
}
- // match: (SETEQ (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAE (BTQconst [log2(c)] x))
+ // match: (SETB (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (SETEQ (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAE (BTQconst [log2(c)] x))
+ // match: (SETB (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64MOVQconst {
- break
- }
- c := v_0_0.AuxInt
- x := v_0.Args[1]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 1
return true
}
- // match: (SETEQ (TESTQ x (MOVQconst [c])))
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAE (BTQconst [log2(c)] x))
+ // match: (SETB (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64MOVQconst {
- break
- }
- c := v_0_1.AuxInt
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (SETEQ (InvertFlags x))
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool {
+ // match: (SETBE (InvertFlags x))
// cond:
- // result: (SETEQ x)
+ // result: (SETAE x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64SETEQ)
+ v.reset(OpAMD64SETAE)
v.AddArg(x)
return true
}
- // match: (SETEQ (FlagEQ))
+ // match: (SETBE (FlagEQ))
// cond:
// result: (MOVLconst [1])
for {
v.AuxInt = 1
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool {
- // match: (SETEQ (FlagLT_ULT))
+ // match: (SETBE (FlagLT_ULT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
- // match: (SETEQ (FlagLT_UGT))
+ // match: (SETBE (FlagLT_UGT))
// cond:
// result: (MOVLconst [0])
for {
v.AuxInt = 0
return true
}
- // match: (SETEQ (FlagGT_ULT))
+ // match: (SETBE (FlagGT_ULT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
- // match: (SETEQ (FlagGT_UGT))
+ // match: (SETBE (FlagGT_UGT))
// cond:
// result: (MOVLconst [0])
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool {
b := v.Block
_ = b
- config := b.Func.Config
- _ = config
- // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
- // cond: !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTL x y) mem)
+ // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem)
+ // cond:
+ // result: (SETAEmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTL {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64SHLL {
- break
- }
- _ = v_1_0.Args[1]
- v_1_0_0 := v_1_0.Args[0]
- if v_1_0_0.Op != OpAMD64MOVLconst {
- break
- }
- if v_1_0_0.AuxInt != 1 {
+ if v_1.Op != OpAMD64InvertFlags {
break
}
- x := v_1_0.Args[1]
- y := v_1.Args[1]
+ x := v_1.Args[0]
mem := v.Args[2]
- if !(!config.nacl) {
- break
- }
v.reset(OpAMD64SETAEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg(x)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
- // cond: !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTL x y) mem)
+ // match: (SETBEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SETBEmem [off1+off2] {sym} base val mem)
for {
- off := v.AuxInt
+ off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTL {
- break
- }
- _ = v_1.Args[1]
- y := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64SHLL {
- break
- }
- _ = v_1_1.Args[1]
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64MOVLconst {
- break
- }
- if v_1_1_0.AuxInt != 1 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- x := v_1_1.Args[1]
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
mem := v.Args[2]
- if !(!config.nacl) {
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETAEmem)
- v.AuxInt = off
+ v.reset(OpAMD64SETBEmem)
+ v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg(base)
+ v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
- // cond: !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem)
+ // match: (SETBEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SETBEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off1 := v.AuxInt
+ sym1 := v.Aux
_ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64SHLQ {
- break
- }
- _ = v_1_0.Args[1]
- v_1_0_0 := v_1_0.Args[0]
- if v_1_0_0.Op != OpAMD64MOVQconst {
- break
- }
- if v_1_0_0.AuxInt != 1 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
break
}
- x := v_1_0.Args[1]
- y := v_1.Args[1]
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
mem := v.Args[2]
- if !(!config.nacl) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETAEmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.reset(OpAMD64SETBEmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
- // cond: !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem)
+ // match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- y := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64SHLQ {
- break
- }
- _ = v_1_1.Args[1]
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64MOVQconst {
- break
- }
- if v_1_1_0.AuxInt != 1 {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagEQ {
break
}
- x := v_1_1.Args[1]
mem := v.Args[2]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem)
- // cond: isUint32PowerOfTwo(c) && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
+ // match: (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTLconst {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagLT_ULT {
break
}
- c := v_1.AuxInt
- x := v_1.Args[0]
mem := v.Args[2]
- if !(isUint32PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = log2uint32(c)
- v0.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem)
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // match: (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQconst {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagLT_UGT {
break
}
- c := v_1.AuxInt
- x := v_1.Args[0]
mem := v.Args[2]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // match: (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64MOVQconst {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagGT_ULT {
break
}
- c := v_1_0.AuxInt
- x := v_1.Args[1]
mem := v.Args[2]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // match: (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- x := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64MOVQconst {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagGT_UGT {
break
}
- c := v_1_1.AuxInt
mem := v.Args[2]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETAEmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETEQmem [off] {sym} ptr x mem)
+ // result: (SETAmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETAmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETBmem [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETEQmem [off1+off2] {sym} base val mem)
+ // result: (SETBmem [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETBmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool {
- b := v.Block
- _ = b
- // match: (SETEQmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETBmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETEQmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETBmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETEQmem)
+ v.reset(OpAMD64SETBmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool {
- // match: (SETG (InvertFlags x))
- // cond:
- // result: (SETL x)
+func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
+ // cond: !config.nacl
+ // result: (SETAE (BTL x y))
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64InvertFlags {
+ if v_0.Op != OpAMD64TESTL {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETL)
- v.AddArg(x)
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_0_0_0.AuxInt != 1 {
+ break
+ }
+ x := v_0_0.Args[1]
+ y := v_0.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (SETG (FlagEQ))
- // cond:
- // result: (MOVLconst [0])
+ // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x)))
+ // cond: !config.nacl
+ // result: (SETAE (BTL x y))
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagEQ {
+ if v_0.Op != OpAMD64TESTL {
break
}
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ _ = v_0.Args[1]
+ y := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_0_1.Args[1]
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_0_1_0.AuxInt != 1 {
+ break
+ }
+ x := v_0_1.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (SETG (FlagLT_ULT))
- // cond:
- // result: (MOVLconst [0])
+ // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // cond: !config.nacl
+ // result: (SETAE (BTQ x y))
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_ULT {
+ if v_0.Op != OpAMD64TESTQ {
break
}
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_0_0_0.AuxInt != 1 {
+ break
+ }
+ x := v_0_0.Args[1]
+ y := v_0.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (SETG (FlagLT_UGT))
- // cond:
- // result: (MOVLconst [0])
+ // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x)))
+ // cond: !config.nacl
+ // result: (SETAE (BTQ x y))
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagLT_UGT {
+ if v_0.Op != OpAMD64TESTQ {
break
}
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ _ = v_0.Args[1]
+ y := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_0_1.Args[1]
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_0_1_0.AuxInt != 1 {
+ break
+ }
+ x := v_0_1.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (SETG (FlagGT_ULT))
- // cond:
- // result: (MOVLconst [1])
+ // match: (SETEQ (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(c) && !config.nacl
+ // result: (SETAE (BTLconst [log2uint32(c)] x))
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_ULT {
+ if v_0.Op != OpAMD64TESTLconst {
break
}
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = log2uint32(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- // match: (SETG (FlagGT_UGT))
- // cond:
- // result: (MOVLconst [1])
+ // match: (SETEQ (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETAE (BTQconst [log2(c)] x))
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64FlagGT_UGT {
+ if v_0.Op != OpAMD64TESTQconst {
break
}
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool {
- // match: (SETGE (InvertFlags x))
+ // match: (SETEQ (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETAE (BTQconst [log2(c)] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0_0.AuxInt
+ x := v_0.Args[1]
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ x (MOVQconst [c])))
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETAE (BTQconst [log2(c)] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0_1.AuxInt
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (InvertFlags x))
// cond:
- // result: (SETLE x)
+ // result: (SETEQ x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64SETLE)
+ v.reset(OpAMD64SETEQ)
v.AddArg(x)
return true
}
- // match: (SETGE (FlagEQ))
+ // match: (SETEQ (FlagEQ))
// cond:
// result: (MOVLconst [1])
for {
v.AuxInt = 1
return true
}
- // match: (SETGE (FlagLT_ULT))
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool {
+ // match: (SETEQ (FlagLT_ULT))
// cond:
// result: (MOVLconst [0])
for {
v.AuxInt = 0
return true
}
- // match: (SETGE (FlagLT_UGT))
+ // match: (SETEQ (FlagLT_UGT))
// cond:
// result: (MOVLconst [0])
for {
v.AuxInt = 0
return true
}
- // match: (SETGE (FlagGT_ULT))
+ // match: (SETEQ (FlagGT_ULT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETGE (FlagGT_UGT))
+ // match: (SETEQ (FlagGT_UGT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem)
- // cond:
- // result: (SETLEmem [off] {sym} ptr x mem)
+ config := b.Func.Config
+ _ = config
+ // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // cond: !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTL x y) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64InvertFlags {
+ if v_1.Op != OpAMD64TESTL {
break
}
- x := v_1.Args[0]
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_1_0_0.AuxInt != 1 {
+ break
+ }
+ x := v_1_0.Args[1]
+ y := v_1.Args[1]
mem := v.Args[2]
- v.reset(OpAMD64SETLEmem)
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off1] {sym} (ADDQconst [off2] base) val mem)
- // cond: is32Bit(off1+off2)
- // result: (SETGEmem [off1+off2] {sym} base val mem)
+ // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
+ // cond: !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTL x y) mem)
for {
- off1 := v.AuxInt
+ off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64ADDQconst {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
break
}
- off2 := v_0.AuxInt
- base := v_0.Args[0]
- val := v.Args[1]
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_1_1_0.AuxInt != 1 {
+ break
+ }
+ x := v_1_1.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
+ if !(!config.nacl) {
break
}
- v.reset(OpAMD64SETGEmem)
- v.AuxInt = off1 + off2
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
v.Aux = sym
- v.AddArg(base)
- v.AddArg(val)
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETGEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // cond: !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64LEAQ {
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
- base := v_0.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64SHLQ {
break
}
- v.reset(OpAMD64SETGEmem)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(base)
- v.AddArg(val)
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_1_0_0.AuxInt != 1 {
+ break
+ }
+ x := v_1_0.Args[1]
+ y := v_1.Args[1]
+ mem := v.Args[2]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem)
- // cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
+ // cond: !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagEQ {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_1_1_0.AuxInt != 1 {
break
}
+ x := v_1_1.Args[1]
mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
- // cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem)
+ // cond: isUint32PowerOfTwo(c) && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagLT_ULT {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTLconst {
break
}
+ c := v_1.AuxInt
+ x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
+ if !(isUint32PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = log2uint32(c)
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
- // cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem)
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagLT_UGT {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQconst {
break
}
+ c := v_1.AuxInt
+ x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
- // cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagGT_ULT {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVQconst {
break
}
+ c := v_1_0.AuxInt
+ x := v_1.Args[1]
mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
- // cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagGT_UGT {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
break
}
+ _ = v_1.Args[1]
+ x := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1_1.AuxInt
mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
v.AddArg(v0)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETLmem [off] {sym} ptr x mem)
+ // result: (SETEQmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETLmem)
+ v.reset(OpAMD64SETEQmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETGmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETEQmem [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETGmem [off1+off2] {sym} base val mem)
+ // result: (SETEQmem [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETGmem)
+ v.reset(OpAMD64SETEQmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETGmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (SETEQmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETGmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETEQmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETGmem)
+ v.reset(OpAMD64SETEQmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool {
- // match: (SETL (InvertFlags x))
+func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool {
+ // match: (SETG (InvertFlags x))
// cond:
- // result: (SETG x)
+ // result: (SETL x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64SETG)
+ v.reset(OpAMD64SETL)
v.AddArg(x)
return true
}
- // match: (SETL (FlagEQ))
+ // match: (SETG (FlagEQ))
// cond:
// result: (MOVLconst [0])
for {
v.AuxInt = 0
return true
}
- // match: (SETL (FlagLT_ULT))
+ // match: (SETG (FlagLT_ULT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETL (FlagLT_UGT))
+ // match: (SETG (FlagLT_UGT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETL (FlagGT_ULT))
+ // match: (SETG (FlagGT_ULT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
- // match: (SETL (FlagGT_UGT))
+ // match: (SETG (FlagGT_UGT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool {
- // match: (SETLE (InvertFlags x))
+func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool {
+ // match: (SETGE (InvertFlags x))
// cond:
- // result: (SETGE x)
+ // result: (SETLE x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64SETGE)
+ v.reset(OpAMD64SETLE)
v.AddArg(x)
return true
}
- // match: (SETLE (FlagEQ))
+ // match: (SETGE (FlagEQ))
// cond:
// result: (MOVLconst [1])
for {
v.AuxInt = 1
return true
}
- // match: (SETLE (FlagLT_ULT))
+ // match: (SETGE (FlagLT_ULT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETLE (FlagLT_UGT))
+ // match: (SETGE (FlagLT_UGT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagLT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETLE (FlagGT_ULT))
+ // match: (SETGE (FlagGT_ULT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
- // match: (SETLE (FlagGT_UGT))
+ // match: (SETGE (FlagGT_UGT))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETGEmem [off] {sym} ptr x mem)
+ // result: (SETLEmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETGEmem)
+ v.reset(OpAMD64SETLEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETGEmem [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETLEmem [off1+off2] {sym} base val mem)
+ // result: (SETGEmem [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETLEmem)
+ v.reset(OpAMD64SETGEmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETGEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETLEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETGEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETLEmem)
+ v.reset(OpAMD64SETGEmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem)
+ // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETGmem [off] {sym} ptr x mem)
+ // result: (SETLmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETGmem)
+ v.reset(OpAMD64SETLmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETLmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETGmem [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETLmem [off1+off2] {sym} base val mem)
+ // result: (SETGmem [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETLmem)
+ v.reset(OpAMD64SETGmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETLmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETGmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETLmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETGmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETLmem)
+ v.reset(OpAMD64SETGmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- // match: (SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem)
- // cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- ptr := v.Args[0]
- x := v.Args[1]
- if x.Op != OpAMD64FlagLT_UGT {
- break
- }
- mem := v.Args[2]
- v.reset(OpAMD64MOVBstore)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
_ = v.Args[2]
ptr := v.Args[0]
x := v.Args[1]
- if x.Op != OpAMD64FlagGT_ULT {
+ if x.Op != OpAMD64FlagLT_UGT {
break
}
mem := v.Args[2]
v.AddArg(mem)
return true
}
- // match: (SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
x := v.Args[1]
- if x.Op != OpAMD64FlagGT_UGT {
+ if x.Op != OpAMD64FlagGT_ULT {
break
}
mem := v.Args[2]
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 0
- v.AddArg(v0)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
- // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
- // cond: !config.nacl
- // result: (SETB (BTL x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64SHLL {
- break
- }
- _ = v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVLconst {
- break
- }
- if v_0_0_0.AuxInt != 1 {
- break
- }
- x := v_0_0.Args[1]
- y := v_0.Args[1]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x)))
- // cond: !config.nacl
- // result: (SETB (BTL x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTL {
- break
- }
- _ = v_0.Args[1]
- y := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64SHLL {
- break
- }
- _ = v_0_1.Args[1]
- v_0_1_0 := v_0_1.Args[0]
- if v_0_1_0.Op != OpAMD64MOVLconst {
- break
- }
- if v_0_1_0.AuxInt != 1 {
- break
- }
- x := v_0_1.Args[1]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
- // cond: !config.nacl
- // result: (SETB (BTQ x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64SHLQ {
- break
- }
- _ = v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVQconst {
- break
- }
- if v_0_0_0.AuxInt != 1 {
- break
- }
- x := v_0_0.Args[1]
- y := v_0.Args[1]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x)))
- // cond: !config.nacl
- // result: (SETB (BTQ x y))
- for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- y := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64SHLQ {
- break
- }
- _ = v_0_1.Args[1]
- v_0_1_0 := v_0_1.Args[0]
- if v_0_1_0.Op != OpAMD64MOVQconst {
- break
- }
- if v_0_1_0.AuxInt != 1 {
- break
- }
- x := v_0_1.Args[1]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SETNE (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(c) && !config.nacl
- // result: (SETB (BTLconst [log2uint32(c)] x))
+ // match: (SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTLconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
- if !(isUint32PowerOfTwo(c) && !config.nacl) {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = log2uint32(c)
- v0.AddArg(x)
+ mem := v.Args[2]
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SETNE (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETB (BTQconst [log2(c)] x))
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool {
+ // match: (SETL (InvertFlags x))
+ // cond:
+ // result: (SETG x)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQconst {
+ if v_0.Op != OpAMD64InvertFlags {
break
}
- c := v_0.AuxInt
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpAMD64SETG)
+ v.AddArg(x)
return true
}
- // match: (SETNE (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETB (BTQconst [log2(c)] x))
+ // match: (SETL (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64MOVQconst {
+ if v_0.Op != OpAMD64FlagEQ {
break
}
- c := v_0_0.AuxInt
- x := v_0.Args[1]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETL (FlagLT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 1
return true
}
- // match: (SETNE (TESTQ x (MOVQconst [c])))
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETB (BTQconst [log2(c)] x))
+ // match: (SETL (FlagLT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
- if v_0.Op != OpAMD64TESTQ {
+ if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpAMD64MOVQconst {
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETL (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- c := v_0_1.AuxInt
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (SETL (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
- v.AddArg(v0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (SETNE (InvertFlags x))
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool {
+ // match: (SETLE (InvertFlags x))
// cond:
- // result: (SETNE x)
+ // result: (SETGE x)
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64InvertFlags {
break
}
x := v_0.Args[0]
- v.reset(OpAMD64SETNE)
+ v.reset(OpAMD64SETGE)
v.AddArg(x)
return true
}
- // match: (SETNE (FlagEQ))
+ // match: (SETLE (FlagEQ))
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVLconst [1])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagEQ {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ v.AuxInt = 1
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool {
- // match: (SETNE (FlagLT_ULT))
+ // match: (SETLE (FlagLT_ULT))
// cond:
// result: (MOVLconst [1])
for {
v.AuxInt = 1
return true
}
- // match: (SETNE (FlagLT_UGT))
+ // match: (SETLE (FlagLT_UGT))
// cond:
// result: (MOVLconst [1])
for {
v.AuxInt = 1
return true
}
- // match: (SETNE (FlagGT_ULT))
+ // match: (SETLE (FlagGT_ULT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_ULT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
- // match: (SETNE (FlagGT_UGT))
+ // match: (SETLE (FlagGT_UGT))
// cond:
- // result: (MOVLconst [1])
+ // result: (MOVLconst [0])
for {
v_0 := v.Args[0]
if v_0.Op != OpAMD64FlagGT_UGT {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = 1
+ v.AuxInt = 0
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool {
b := v.Block
_ = b
- config := b.Func.Config
- _ = config
- // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
- // cond: !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTL x y) mem)
+ // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem)
+ // cond:
+ // result: (SETGEmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTL {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64SHLL {
- break
- }
- _ = v_1_0.Args[1]
- v_1_0_0 := v_1_0.Args[0]
- if v_1_0_0.Op != OpAMD64MOVLconst {
- break
- }
- if v_1_0_0.AuxInt != 1 {
+ if v_1.Op != OpAMD64InvertFlags {
break
}
- x := v_1_0.Args[1]
- y := v_1.Args[1]
+ x := v_1.Args[0]
mem := v.Args[2]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64SETGEmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg(x)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
- // cond: !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTL x y) mem)
+ // match: (SETLEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SETLEmem [off1+off2] {sym} base val mem)
for {
- off := v.AuxInt
+ off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTL {
- break
- }
- _ = v_1.Args[1]
- y := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64SHLL {
- break
- }
- _ = v_1_1.Args[1]
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64MOVLconst {
- break
- }
- if v_1_1_0.AuxInt != 1 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- x := v_1_1.Args[1]
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
mem := v.Args[2]
- if !(!config.nacl) {
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETBmem)
- v.AuxInt = off
+ v.reset(OpAMD64SETLEmem)
+ v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.AddArg(base)
+ v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
- // cond: !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQ x y) mem)
+ // match: (SETLEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SETLEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off1 := v.AuxInt
+ sym1 := v.Aux
_ = v.Args[2]
- ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64SHLQ {
- break
- }
- _ = v_1_0.Args[1]
- v_1_0_0 := v_1_0.Args[0]
- if v_1_0_0.Op != OpAMD64MOVQconst {
- break
- }
- if v_1_0_0.AuxInt != 1 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
break
}
- x := v_1_0.Args[1]
- y := v_1.Args[1]
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
mem := v.Args[2]
- if !(!config.nacl) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETBmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
- v.AddArg(v0)
+ v.reset(OpAMD64SETLEmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
- // cond: !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQ x y) mem)
+ // match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- y := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64SHLQ {
- break
- }
- _ = v_1_1.Args[1]
- v_1_1_0 := v_1_1.Args[0]
- if v_1_1_0.Op != OpAMD64MOVQconst {
- break
- }
- if v_1_1_0.AuxInt != 1 {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagEQ {
break
}
- x := v_1_1.Args[1]
mem := v.Args[2]
- if !(!config.nacl) {
- break
- }
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg(x)
- v0.AddArg(y)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem)
- // cond: isUint32PowerOfTwo(c) && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
+ // match: (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTLconst {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagLT_ULT {
break
}
- c := v_1.AuxInt
- x := v_1.Args[0]
mem := v.Args[2]
- if !(isUint32PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = log2uint32(c)
- v0.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem)
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // match: (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQconst {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagLT_UGT {
break
}
- c := v_1.AuxInt
- x := v_1.Args[0]
mem := v.Args[2]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // match: (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64MOVQconst {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagGT_ULT {
break
}
- c := v_1_0.AuxInt
- x := v_1.Args[1]
mem := v.Args[2]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
- // cond: isUint64PowerOfTwo(c) && !config.nacl
- // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
+ // match: (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- x := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpAMD64MOVQconst {
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagGT_UGT {
break
}
- c := v_1_1.AuxInt
mem := v.Args[2]
- if !(isUint64PowerOfTwo(c) && !config.nacl) {
- break
- }
- v.reset(OpAMD64SETBmem)
+ v.reset(OpAMD64MOVBstore)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = log2(c)
- v0.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: (SETNEmem [off] {sym} ptr x mem)
+ // result: (SETGmem [off] {sym} ptr x mem)
for {
off := v.AuxInt
sym := v.Aux
}
x := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETGmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // match: (SETLmem [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(off1+off2)
- // result: (SETNEmem [off1+off2] {sym} base val mem)
+ // result: (SETLmem [off1+off2] {sym} base val mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETLmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(base)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool {
- b := v.Block
- _ = b
- // match: (SETNEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // match: (SETLmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SETNEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ // result: (SETLmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SETNEmem)
+ v.reset(OpAMD64SETLmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // match: (SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // match: (SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // match: (SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
- // match: (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
+ // match: (SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
- // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
off := v.AuxInt
sym := v.Aux
v.Aux = sym
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
- v0.AuxInt = 1
+ v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SHLL x (MOVQconst [c]))
- // cond:
- // result: (SHLLconst [c&31] x)
+ config := b.Func.Config
+ _ = config
+ // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
+ // cond: !config.nacl
+ // result: (SETB (BTL x y))
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
break
}
- c := v_1.AuxInt
- v.reset(OpAMD64SHLLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLL x (MOVLconst [c]))
- // cond:
- // result: (SHLLconst [c&31] x)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst {
break
}
- c := v_1.AuxInt
- v.reset(OpAMD64SHLLconst)
- v.AuxInt = c & 31
- v.AddArg(x)
- return true
- }
- // match: (SHLL x (ADDQconst [c] y))
- // cond: c & 31 == 0
- // result: (SHLL x y)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
+ if v_0_0_0.AuxInt != 1 {
break
}
- c := v_1.AuxInt
- y := v_1.Args[0]
- if !(c&31 == 0) {
+ x := v_0_0.Args[1]
+ y := v_0.Args[1]
+ if !(!config.nacl) {
break
}
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
- // cond: c & 31 == 0
- // result: (SHLL x (NEGQ <t> y))
+ // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x)))
+ // cond: !config.nacl
+ // result: (SETB (BTL x y))
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGQ {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
break
}
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDQconst {
+ _ = v_0.Args[1]
+ y := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SHLL {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&31 == 0) {
+ _ = v_0_1.Args[1]
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ if v_0_1_0.AuxInt != 1 {
+ break
+ }
+ x := v_0_1.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- // match: (SHLL x (ANDQconst [c] y))
- // cond: c & 31 == 31
- // result: (SHLL x y)
+ // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // cond: !config.nacl
+ // result: (SETB (BTQ x y))
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDQconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
break
}
- c := v_1.AuxInt
- y := v_1.Args[0]
- if !(c&31 == 31) {
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLQ {
break
}
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_0_0_0.AuxInt != 1 {
+ break
+ }
+ x := v_0_0.Args[1]
+ y := v_0.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
- // cond: c & 31 == 31
- // result: (SHLL x (NEGQ <t> y))
+ // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x)))
+ // cond: !config.nacl
+ // result: (SETB (BTQ x y))
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGQ {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
break
}
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDQconst {
+ _ = v_0.Args[1]
+ y := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SHLQ {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&31 == 31) {
+ _ = v_0_1.Args[1]
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ if v_0_1_0.AuxInt != 1 {
+ break
+ }
+ x := v_0_1.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- // match: (SHLL x (ADDLconst [c] y))
- // cond: c & 31 == 0
- // result: (SHLL x y)
+ // match: (SETNE (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(c) && !config.nacl
+ // result: (SETB (BTLconst [log2uint32(c)] x))
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDLconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTLconst {
break
}
- c := v_1.AuxInt
- y := v_1.Args[0]
- if !(c&31 == 0) {
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = log2uint32(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- // match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
- // cond: c & 31 == 0
- // result: (SHLL x (NEGL <t> y))
+ // match: (SETNE (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETB (BTQconst [log2(c)] x))
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDLconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQconst {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&31 == 0) {
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
- // match: (SHLL x (ANDLconst [c] y))
- // cond: c & 31 == 31
- // result: (SHLL x y)
+ // match: (SETNE (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETB (BTQconst [log2(c)] x))
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDLconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
break
}
- c := v_1.AuxInt
- y := v_1.Args[0]
- if !(c&31 == 31) {
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v.AddArg(y)
+ c := v_0_0.AuxInt
+ x := v_0.Args[1]
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- // match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
- // cond: c & 31 == 31
- // result: (SHLL x (NEGL <t> y))
+ // match: (SETNE (TESTQ x (MOVQconst [c])))
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETB (BTQconst [log2(c)] x))
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGL {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
break
}
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDLconst {
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64MOVQconst {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&31 == 31) {
+ c := v_0_1.AuxInt
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SHLL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool {
- // match: (SHLLconst x [0])
+ // match: (SETNE (InvertFlags x))
// cond:
- // result: x
+ // result: (SETNE x)
for {
- if v.AuxInt != 0 {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64InvertFlags {
break
}
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETNE)
v.AddArg(x)
return true
}
+ // match: (SETNE (FlagEQ))
+ // cond:
+ // result: (MOVLconst [0])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
+ return true
+ }
return false
}
-func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (SHLQ x (MOVQconst [c]))
+func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool {
+ // match: (SETNE (FlagLT_ULT))
// cond:
- // result: (SHLQconst [c&63] x)
+ // result: (MOVLconst [1])
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagLT_ULT {
break
}
- c := v_1.AuxInt
- v.reset(OpAMD64SHLQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 1
return true
}
- // match: (SHLQ x (MOVLconst [c]))
+ // match: (SETNE (FlagLT_UGT))
// cond:
- // result: (SHLQconst [c&63] x)
+ // result: (MOVLconst [1])
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagLT_UGT {
break
}
- c := v_1.AuxInt
- v.reset(OpAMD64SHLQconst)
- v.AuxInt = c & 63
- v.AddArg(x)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 1
return true
}
- // match: (SHLQ x (ADDQconst [c] y))
- // cond: c & 63 == 0
- // result: (SHLQ x y)
+ // match: (SETNE (FlagGT_ULT))
+ // cond:
+ // result: (MOVLconst [1])
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagGT_ULT {
break
}
- c := v_1.AuxInt
- y := v_1.Args[0]
- if !(c&63 == 0) {
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (SETNE (FlagGT_UGT))
+ // cond:
+ // result: (MOVLconst [1])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64FlagGT_UGT {
break
}
- v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 1
return true
}
- // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
- // cond: c & 63 == 0
- // result: (SHLQ x (NEGQ <t> y))
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // cond: !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTL x y) mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGQ {
+ if v_1.Op != OpAMD64TESTL {
break
}
- t := v_1.Type
+ _ = v_1.Args[1]
v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDQconst {
+ if v_1_0.Op != OpAMD64SHLL {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&63 == 0) {
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst {
break
}
- v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ if v_1_0_0.AuxInt != 1 {
+ break
+ }
+ x := v_1_0.Args[1]
+ y := v_1.Args[1]
+ mem := v.Args[2]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SHLQ x (ANDQconst [c] y))
- // cond: c & 63 == 63
- // result: (SHLQ x y)
+ // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem)
+ // cond: !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTL x y) mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDQconst {
+ if v_1.Op != OpAMD64TESTL {
break
}
- c := v_1.AuxInt
+ _ = v_1.Args[1]
y := v_1.Args[0]
- if !(c&63 == 63) {
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64SHLL {
break
}
- v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_1_1_0.AuxInt != 1 {
+ break
+ }
+ x := v_1_1.Args[1]
+ mem := v.Args[2]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
- // cond: c & 63 == 63
- // result: (SHLQ x (NEGQ <t> y))
+ // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // cond: !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQ x y) mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGQ {
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_1_0_0.AuxInt != 1 {
+ break
+ }
+ x := v_1_0.Args[1]
+ y := v_1.Args[1]
+ mem := v.Args[2]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem)
+ // cond: !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64SHLQ {
break
}
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDQconst {
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64MOVQconst {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
+ if v_1_1_0.AuxInt != 1 {
break
}
- v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ x := v_1_1.Args[1]
+ mem := v.Args[2]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SHLQ x (ADDLconst [c] y))
- // cond: c & 63 == 0
- // result: (SHLQ x y)
+ // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem)
+ // cond: isUint32PowerOfTwo(c) && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDLconst {
+ if v_1.Op != OpAMD64TESTLconst {
break
}
c := v_1.AuxInt
- y := v_1.Args[0]
- if !(c&63 == 0) {
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ if !(isUint32PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = log2uint32(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
- // cond: c & 63 == 0
- // result: (SHLQ x (NEGL <t> y))
+ // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem)
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDLconst {
+ if v_1.Op != OpAMD64TESTQconst {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&63 == 0) {
+ c := v_1.AuxInt
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SHLQ x (ANDLconst [c] y))
- // cond: c & 63 == 63
- // result: (SHLQ x y)
+ // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64ANDLconst {
+ if v_1.Op != OpAMD64TESTQ {
break
}
- c := v_1.AuxInt
- y := v_1.Args[0]
- if !(c&63 == 63) {
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v.AddArg(y)
+ c := v_1_0.AuxInt
+ x := v_1.Args[1]
+ mem := v.Args[2]
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
- // cond: c & 63 == 63
- // result: (SHLQ x (NEGL <t> y))
+ // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem)
+ // cond: isUint64PowerOfTwo(c) && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64NEGL {
+ if v_1.Op != OpAMD64TESTQ {
break
}
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDLconst {
+ _ = v_1.Args[1]
+ x := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64MOVQconst {
break
}
- c := v_1_0.AuxInt
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
+ c := v_1_1.AuxInt
+ mem := v.Args[2]
+ if !(isUint64PowerOfTwo(c) && !config.nacl) {
break
}
- v.reset(OpAMD64SHLQ)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = log2(c)
+ v0.AddArg(x)
v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool {
- // match: (SHLQconst x [0])
+ // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
- // result: x
+ // result: (SETNEmem [off] {sym} ptr x mem)
for {
- if v.AuxInt != 0 {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64InvertFlags {
break
}
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
+ x := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpAMD64SETNEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
v.AddArg(x)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool {
- // match: (SHRB x (MOVQconst [c]))
- // cond: c&31 < 8
- // result: (SHRBconst [c&31] x)
+ // match: (SETNEmem [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SETNEmem [off1+off2] {sym} base val mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
break
}
- c := v_1.AuxInt
- if !(c&31 < 8) {
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64SHRBconst)
- v.AuxInt = c & 31
- v.AddArg(x)
+ v.reset(OpAMD64SETNEmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- // match: (SHRB x (MOVLconst [c]))
- // cond: c&31 < 8
- // result: (SHRBconst [c&31] x)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (SETNEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SETNEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
break
}
- c := v_1.AuxInt
- if !(c&31 < 8) {
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64SHRBconst)
- v.AuxInt = c & 31
- v.AddArg(x)
+ v.reset(OpAMD64SETNEmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- // match: (SHRB _ (MOVQconst [c]))
- // cond: c&31 >= 8
- // result: (MOVLconst [0])
+ // match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [0]) mem)
for {
- _ = v.Args[1]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagEQ {
break
}
- c := v_1.AuxInt
- if !(c&31 >= 8) {
+ mem := v.Args[2]
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 0
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagLT_ULT {
break
}
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ mem := v.Args[2]
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- // match: (SHRB _ (MOVLconst [c]))
- // cond: c&31 >= 8
- // result: (MOVLconst [0])
+ // match: (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
- _ = v.Args[1]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagLT_UGT {
break
}
- c := v_1.AuxInt
- if !(c&31 >= 8) {
+ mem := v.Args[2]
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem)
+ // cond:
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagGT_ULT {
break
}
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
+ mem := v.Args[2]
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool {
- // match: (SHRBconst x [0])
+ // match: (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem)
// cond:
- // result: x
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <x.Type> [1]) mem)
for {
- if v.AuxInt != 0 {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ x := v.Args[1]
+ if x.Op != OpAMD64FlagGT_UGT {
break
}
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ mem := v.Args[2]
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type)
+ v0.AuxInt = 1
+ v.AddArg(v0)
+ v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SHRL x (MOVQconst [c]))
+ // match: (SHLL x (MOVQconst [c]))
// cond:
- // result: (SHRLconst [c&31] x)
+ // result: (SHLLconst [c&31] x)
for {
_ = v.Args[1]
x := v.Args[0]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64SHRLconst)
+ v.reset(OpAMD64SHLLconst)
v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (SHRL x (MOVLconst [c]))
+ // match: (SHLL x (MOVLconst [c]))
// cond:
- // result: (SHRLconst [c&31] x)
+ // result: (SHLLconst [c&31] x)
for {
_ = v.Args[1]
x := v.Args[0]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64SHRLconst)
+ v.reset(OpAMD64SHLLconst)
v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (SHRL x (ADDQconst [c] y))
+ // match: (SHLL x (ADDQconst [c] y))
// cond: c & 31 == 0
- // result: (SHRL x y)
+ // result: (SHLL x y)
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&31 == 0) {
break
}
- v.reset(OpAMD64SHRL)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
+ // match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
// cond: c & 31 == 0
- // result: (SHRL x (NEGQ <t> y))
+ // result: (SHLL x (NEGQ <t> y))
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&31 == 0) {
break
}
- v.reset(OpAMD64SHRL)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- // match: (SHRL x (ANDQconst [c] y))
+ // match: (SHLL x (ANDQconst [c] y))
// cond: c & 31 == 31
- // result: (SHRL x y)
+ // result: (SHLL x y)
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&31 == 31) {
break
}
- v.reset(OpAMD64SHRL)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
+ // match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
// cond: c & 31 == 31
- // result: (SHRL x (NEGQ <t> y))
+ // result: (SHLL x (NEGQ <t> y))
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&31 == 31) {
break
}
- v.reset(OpAMD64SHRL)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- // match: (SHRL x (ADDLconst [c] y))
+ // match: (SHLL x (ADDLconst [c] y))
// cond: c & 31 == 0
- // result: (SHRL x y)
+ // result: (SHLL x y)
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&31 == 0) {
break
}
- v.reset(OpAMD64SHRL)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
+ // match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
// cond: c & 31 == 0
- // result: (SHRL x (NEGL <t> y))
+ // result: (SHLL x (NEGL <t> y))
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&31 == 0) {
break
}
- v.reset(OpAMD64SHRL)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- // match: (SHRL x (ANDLconst [c] y))
+ // match: (SHLL x (ANDLconst [c] y))
// cond: c & 31 == 31
- // result: (SHRL x y)
+ // result: (SHLL x y)
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&31 == 31) {
break
}
- v.reset(OpAMD64SHRL)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
+ // match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
// cond: c & 31 == 31
- // result: (SHRL x (NEGL <t> y))
+ // result: (SHLL x (NEGL <t> y))
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&31 == 31) {
break
}
- v.reset(OpAMD64SHRL)
+ v.reset(OpAMD64SHLL)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool {
- // match: (SHRLconst x [0])
+func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool {
+ // match: (SHLLconst x [0])
// cond:
// result: x
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SHRQ x (MOVQconst [c]))
+ // match: (SHLQ x (MOVQconst [c]))
// cond:
- // result: (SHRQconst [c&63] x)
+ // result: (SHLQconst [c&63] x)
for {
_ = v.Args[1]
x := v.Args[0]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64SHRQconst)
+ v.reset(OpAMD64SHLQconst)
v.AuxInt = c & 63
v.AddArg(x)
return true
}
- // match: (SHRQ x (MOVLconst [c]))
+ // match: (SHLQ x (MOVLconst [c]))
// cond:
- // result: (SHRQconst [c&63] x)
+ // result: (SHLQconst [c&63] x)
for {
_ = v.Args[1]
x := v.Args[0]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64SHRQconst)
+ v.reset(OpAMD64SHLQconst)
v.AuxInt = c & 63
v.AddArg(x)
return true
}
- // match: (SHRQ x (ADDQconst [c] y))
+ // match: (SHLQ x (ADDQconst [c] y))
// cond: c & 63 == 0
- // result: (SHRQ x y)
+ // result: (SHLQ x y)
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&63 == 0) {
break
}
- v.reset(OpAMD64SHRQ)
+ v.reset(OpAMD64SHLQ)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
+ // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
// cond: c & 63 == 0
- // result: (SHRQ x (NEGQ <t> y))
+ // result: (SHLQ x (NEGQ <t> y))
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&63 == 0) {
break
}
- v.reset(OpAMD64SHRQ)
+ v.reset(OpAMD64SHLQ)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- // match: (SHRQ x (ANDQconst [c] y))
+ // match: (SHLQ x (ANDQconst [c] y))
// cond: c & 63 == 63
- // result: (SHRQ x y)
+ // result: (SHLQ x y)
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&63 == 63) {
break
}
- v.reset(OpAMD64SHRQ)
+ v.reset(OpAMD64SHLQ)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
+ // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
// cond: c & 63 == 63
- // result: (SHRQ x (NEGQ <t> y))
+ // result: (SHLQ x (NEGQ <t> y))
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&63 == 63) {
break
}
- v.reset(OpAMD64SHRQ)
+ v.reset(OpAMD64SHLQ)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- // match: (SHRQ x (ADDLconst [c] y))
+ // match: (SHLQ x (ADDLconst [c] y))
// cond: c & 63 == 0
- // result: (SHRQ x y)
+ // result: (SHLQ x y)
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&63 == 0) {
break
}
- v.reset(OpAMD64SHRQ)
+ v.reset(OpAMD64SHLQ)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
+ // match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
// cond: c & 63 == 0
- // result: (SHRQ x (NEGL <t> y))
+ // result: (SHLQ x (NEGL <t> y))
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&63 == 0) {
break
}
- v.reset(OpAMD64SHRQ)
+ v.reset(OpAMD64SHLQ)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- // match: (SHRQ x (ANDLconst [c] y))
+ // match: (SHLQ x (ANDLconst [c] y))
// cond: c & 63 == 63
- // result: (SHRQ x y)
+ // result: (SHLQ x y)
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&63 == 63) {
break
}
- v.reset(OpAMD64SHRQ)
+ v.reset(OpAMD64SHLQ)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
+ // match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
// cond: c & 63 == 63
- // result: (SHRQ x (NEGL <t> y))
+ // result: (SHLQ x (NEGL <t> y))
for {
_ = v.Args[1]
x := v.Args[0]
if !(c&63 == 63) {
break
}
- v.reset(OpAMD64SHRQ)
+ v.reset(OpAMD64SHLQ)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool {
- // match: (SHRQconst x [0])
+func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool {
+ // match: (SHLQconst x [0])
// cond:
// result: x
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool {
- // match: (SHRW x (MOVQconst [c]))
- // cond: c&31 < 16
- // result: (SHRWconst [c&31] x)
+func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool {
+ // match: (SHRB x (MOVQconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [c&31] x)
for {
_ = v.Args[1]
x := v.Args[0]
break
}
c := v_1.AuxInt
- if !(c&31 < 16) {
+ if !(c&31 < 8) {
break
}
- v.reset(OpAMD64SHRWconst)
+ v.reset(OpAMD64SHRBconst)
v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (SHRW x (MOVLconst [c]))
- // cond: c&31 < 16
- // result: (SHRWconst [c&31] x)
+ // match: (SHRB x (MOVLconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [c&31] x)
for {
_ = v.Args[1]
x := v.Args[0]
break
}
c := v_1.AuxInt
- if !(c&31 < 16) {
+ if !(c&31 < 8) {
break
}
- v.reset(OpAMD64SHRWconst)
+ v.reset(OpAMD64SHRBconst)
v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (SHRW _ (MOVQconst [c]))
- // cond: c&31 >= 16
+ // match: (SHRB _ (MOVQconst [c]))
+ // cond: c&31 >= 8
// result: (MOVLconst [0])
for {
_ = v.Args[1]
break
}
c := v_1.AuxInt
- if !(c&31 >= 16) {
+ if !(c&31 >= 8) {
break
}
v.reset(OpAMD64MOVLconst)
v.AuxInt = 0
return true
}
- // match: (SHRW _ (MOVLconst [c]))
- // cond: c&31 >= 16
+ // match: (SHRB _ (MOVLconst [c]))
+ // cond: c&31 >= 8
// result: (MOVLconst [0])
for {
_ = v.Args[1]
break
}
c := v_1.AuxInt
- if !(c&31 >= 16) {
+ if !(c&31 >= 8) {
break
}
v.reset(OpAMD64MOVLconst)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool {
- // match: (SHRWconst x [0])
+func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool {
+ // match: (SHRBconst x [0])
// cond:
// result: x
for {
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool {
b := v.Block
_ = b
- // match: (SUBL x (MOVLconst [c]))
+ // match: (SHRL x (MOVQconst [c]))
// cond:
- // result: (SUBLconst x [c])
+ // result: (SHRLconst [c&31] x)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ if v_1.Op != OpAMD64MOVQconst {
break
}
c := v_1.AuxInt
- v.reset(OpAMD64SUBLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (SUBL (MOVLconst [c]) x)
- // cond:
- // result: (NEGL (SUBLconst <v.Type> x [c]))
- for {
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64NEGL)
- v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
- v0.AuxInt = c
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (SUBL x x)
- // cond:
- // result: (MOVLconst [0])
- for {
- _ = v.Args[1]
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = 0
- return true
- }
- // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBLmem x [off] {sym} ptr mem)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SUBLmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool {
- // match: (SUBLconst [c] x)
- // cond: int32(c) == 0
- // result: x
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(int32(c) == 0) {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (SUBLconst [c] x)
- // cond:
- // result: (ADDLconst [int64(int32(-c))] x)
- for {
- c := v.AuxInt
- x := v.Args[0]
- v.reset(OpAMD64ADDLconst)
- v.AuxInt = int64(int32(-c))
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = c & 31
v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (SUBLmem [off1] {sym} val (ADDQconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (SUBLmem [off1+off2] {sym} val base mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(OpAMD64SUBLmem)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
- return true
- }
- // match: (SUBLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64LEAQ {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpAMD64SUBLmem)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
- return true
- }
- // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // match: (SHRL x (MOVLconst [c]))
// cond:
- // result: (SUBL x (MOVLf2i y))
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- x := v.Args[0]
- ptr := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64MOVSSstore {
- break
- }
- if v_2.AuxInt != off {
- break
- }
- if v_2.Aux != sym {
- break
- }
- _ = v_2.Args[2]
- if ptr != v_2.Args[0] {
- break
- }
- y := v_2.Args[1]
- v.reset(OpAMD64SUBL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (SUBQ x (MOVQconst [c]))
- // cond: is32Bit(c)
- // result: (SUBQconst x [c])
+ // result: (SHRLconst [c&31] x)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
c := v_1.AuxInt
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64SUBQconst)
- v.AuxInt = c
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (SUBQ (MOVQconst [c]) x)
- // cond: is32Bit(c)
- // result: (NEGQ (SUBQconst <v.Type> x [c]))
- for {
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64NEGQ)
- v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
- v0.AuxInt = c
- v0.AddArg(x)
- v.AddArg(v0)
- return true
- }
- // match: (SUBQ x x)
- // cond:
- // result: (MOVQconst [0])
- for {
- _ = v.Args[1]
- x := v.Args[0]
- if x != v.Args[1] {
- break
- }
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
- return true
- }
- // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBQmem x [off] {sym} ptr mem)
+ // match: (SHRL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRL x y)
for {
_ = v.Args[1]
x := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SUBQmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool {
- // match: (SUBQconst [0] x)
- // cond:
- // result: x
- for {
- if v.AuxInt != 0 {
- break
- }
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (SUBQconst [c] x)
- // cond: c != -(1<<31)
- // result: (ADDQconst [-c] x)
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(c != -(1 << 31)) {
- break
- }
- v.reset(OpAMD64ADDQconst)
- v.AuxInt = -c
- v.AddArg(x)
- return true
- }
- // match: (SUBQconst (MOVQconst [d]) [c])
- // cond:
- // result: (MOVQconst [d-c])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
- break
- }
- d := v_0.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = d - c
- return true
- }
- // match: (SUBQconst (SUBQconst x [d]) [c])
- // cond: is32Bit(-c-d)
- // result: (ADDQconst [-c-d] x)
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SUBQconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- if !(is32Bit(-c - d)) {
- break
- }
- v.reset(OpAMD64ADDQconst)
- v.AuxInt = -c - d
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (SUBQmem [off1] {sym} val (ADDQconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (SUBQmem [off1+off2] {sym} val base mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpAMD64ADDQconst {
break
}
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(OpAMD64SUBQmem)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
- return true
- }
- // match: (SUBQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
- for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64LEAQ {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
- break
- }
- v.reset(OpAMD64SUBQmem)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
- return true
- }
- // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
- // cond:
- // result: (SUBQ x (MOVQf2i y))
- for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- x := v.Args[0]
- ptr := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64MOVSDstore {
- break
- }
- if v_2.AuxInt != off {
- break
- }
- if v_2.Aux != sym {
- break
- }
- _ = v_2.Args[2]
- if ptr != v_2.Args[0] {
- break
- }
- y := v_2.Args[1]
- v.reset(OpAMD64SUBQ)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool {
- // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBSDmem x [off] {sym} ptr mem)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVSDload {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
break
}
- v.reset(OpAMD64SUBSDmem)
- v.AuxInt = off
- v.Aux = sym
+ v.reset(OpAMD64SHRL)
v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (SUBSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (SUBSDmem [off1+off2] {sym} val base mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
+ // match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRL x (NEGQ <t> y))
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
+ if v_1.Op != OpAMD64NEGQ {
break
}
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
break
}
- v.reset(OpAMD64SUBSDmem)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (SUBSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // match: (SHRL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRL x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
+ _ = v.Args[1]
+ x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64LEAQ {
+ if v_1.Op != OpAMD64ANDQconst {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
break
}
- v.reset(OpAMD64SUBSDmem)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
- // cond:
- // result: (SUBSD x (MOVQi2f y))
+ // match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRL x (NEGQ <t> y))
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
- ptr := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64MOVQstore {
- break
- }
- if v_2.AuxInt != off {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64NEGQ {
break
}
- if v_2.Aux != sym {
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
break
}
- _ = v_2.Args[2]
- if ptr != v_2.Args[0] {
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
break
}
- y := v_2.Args[1]
- v.reset(OpAMD64SUBSD)
+ v.reset(OpAMD64SHRL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(y)
v.AddArg(v0)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool {
- // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (SUBSSmem x [off] {sym} ptr mem)
+ // match: (SHRL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRL x y)
for {
_ = v.Args[1]
x := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVSSload {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDLconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
break
}
- v.reset(OpAMD64SUBSSmem)
- v.AuxInt = off
- v.Aux = sym
+ v.reset(OpAMD64SHRL)
v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (SUBSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
- // cond: is32Bit(off1+off2)
- // result: (SUBSSmem [off1+off2] {sym} val base mem)
+ // match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRL x (NEGL <t> y))
for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
+ _ = v.Args[1]
+ x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
+ if v_1.Op != OpAMD64NEGL {
break
}
- off2 := v_1.AuxInt
- base := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
break
}
- v.reset(OpAMD64SUBSSmem)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (SUBSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (SUBSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // match: (SHRL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRL x y)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
+ _ = v.Args[1]
+ x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64LEAQ {
+ if v_1.Op != OpAMD64ANDLconst {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
break
}
- v.reset(OpAMD64SUBSSmem)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(base)
- v.AddArg(mem)
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
- // cond:
- // result: (SUBSS x (MOVLi2f y))
+ // match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRL x (NEGL <t> y))
for {
- off := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
- ptr := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64MOVLstore {
- break
- }
- if v_2.AuxInt != off {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64NEGL {
break
}
- if v_2.Aux != sym {
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
break
}
- _ = v_2.Args[2]
- if ptr != v_2.Args[0] {
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
break
}
- y := v_2.Args[1]
- v.reset(OpAMD64SUBSS)
+ v.reset(OpAMD64SHRL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
v0.AddArg(y)
v.AddArg(v0)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool {
+ // match: (SHRLconst x [0])
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool {
b := v.Block
_ = b
- // match: (TESTB (MOVLconst [c]) x)
+ // match: (SHRQ x (MOVQconst [c]))
// cond:
- // result: (TESTBconst [c] x)
+ // result: (SHRQconst [c&63] x)
for {
_ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
break
}
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64TESTBconst)
- v.AuxInt = c
+ c := v_1.AuxInt
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = c & 63
v.AddArg(x)
return true
}
- // match: (TESTB x (MOVLconst [c]))
+ // match: (SHRQ x (MOVLconst [c]))
// cond:
- // result: (TESTBconst [c] x)
+ // result: (SHRQconst [c&63] x)
for {
_ = v.Args[1]
x := v.Args[0]
break
}
c := v_1.AuxInt
- v.reset(OpAMD64TESTBconst)
- v.AuxInt = c
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = c & 63
v.AddArg(x)
return true
}
- // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // match: (SHRQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRQ x y)
for {
_ = v.Args[1]
- l := v.Args[0]
- if l.Op != OpAMD64MOVBload {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- l2 := v.Args[1]
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
break
}
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (TESTB l2 l:(MOVBload {sym} [off] ptr mem))
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRQ x (NEGQ <t> y))
for {
_ = v.Args[1]
- l2 := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVBload {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64NEGQ {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
break
}
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
- v.reset(OpCopy)
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
v.AddArg(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool {
- // match: (TESTBconst [-1] x)
- // cond:
- // result: (TESTB x x)
+ // match: (SHRQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRQ x y)
for {
- if v.AuxInt != -1 {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDQconst {
break
}
- x := v.Args[0]
- v.reset(OpAMD64TESTB)
- v.AddArg(x)
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
v.AddArg(x)
+ v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (TESTL (MOVLconst [c]) x)
- // cond:
- // result: (TESTLconst [c] x)
+ // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRQ x (NEGQ <t> y))
for {
_ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64NEGQ {
break
}
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64TESTLconst)
- v.AuxInt = c
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (TESTL x (MOVLconst [c]))
- // cond:
- // result: (TESTLconst [c] x)
+ // match: (SHRQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRQ x y)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ if v_1.Op != OpAMD64ADDLconst {
break
}
c := v_1.AuxInt
- v.reset(OpAMD64TESTLconst)
- v.AuxInt = c
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
v.AddArg(x)
+ v.AddArg(y)
return true
}
- // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRQ x (NEGL <t> y))
for {
_ = v.Args[1]
- l := v.Args[0]
- if l.Op != OpAMD64MOVLload {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64NEGL {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- l2 := v.Args[1]
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
break
}
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
- v.reset(OpCopy)
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
v.AddArg(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
return true
}
- // match: (TESTL l2 l:(MOVLload {sym} [off] ptr mem))
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // match: (SHRQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRQ x y)
for {
_ = v.Args[1]
- l2 := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVLload {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDLconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ c := v_1.AuxInt
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
break
}
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
- v.reset(OpCopy)
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRQ x (NEGL <t> y))
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := v_1_0.AuxInt
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
v.AddArg(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool {
- // match: (TESTLconst [-1] x)
+func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool {
+ // match: (SHRQconst x [0])
// cond:
- // result: (TESTL x x)
+ // result: x
for {
- if v.AuxInt != -1 {
+ if v.AuxInt != 0 {
break
}
x := v.Args[0]
- v.reset(OpAMD64TESTL)
- v.AddArg(x)
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool {
- b := v.Block
- _ = b
- // match: (TESTQ (MOVQconst [c]) x)
- // cond: is32Bit(c)
- // result: (TESTQconst [c] x)
+func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool {
+ // match: (SHRW x (MOVQconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [c&31] x)
for {
_ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
break
}
- c := v_0.AuxInt
- x := v.Args[1]
- if !(is32Bit(c)) {
+ c := v_1.AuxInt
+ if !(c&31 < 16) {
break
}
- v.reset(OpAMD64TESTQconst)
- v.AuxInt = c
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (TESTQ x (MOVQconst [c]))
- // cond: is32Bit(c)
- // result: (TESTQconst [c] x)
+ // match: (SHRW x (MOVLconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [c&31] x)
for {
_ = v.Args[1]
x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
c := v_1.AuxInt
- if !(is32Bit(c)) {
+ if !(c&31 < 16) {
break
}
- v.reset(OpAMD64TESTQconst)
- v.AuxInt = c
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = c & 31
v.AddArg(x)
return true
}
- // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // match: (SHRW _ (MOVQconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
for {
_ = v.Args[1]
- l := v.Args[0]
- if l.Op != OpAMD64MOVQload {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- l2 := v.Args[1]
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ c := v_1.AuxInt
+ if !(c&31 >= 16) {
break
}
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (TESTQ l2 l:(MOVQload {sym} [off] ptr mem))
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // match: (SHRW _ (MOVLconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
for {
_ = v.Args[1]
- l2 := v.Args[0]
- l := v.Args[1]
- if l.Op != OpAMD64MOVQload {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ c := v_1.AuxInt
+ if !(c&31 >= 16) {
break
}
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool {
- // match: (TESTQconst [-1] x)
+func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool {
+ // match: (SHRWconst x [0])
// cond:
- // result: (TESTQ x x)
+ // result: x
for {
- if v.AuxInt != -1 {
+ if v.AuxInt != 0 {
break
}
x := v.Args[0]
- v.reset(OpAMD64TESTQ)
- v.AddArg(x)
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool {
b := v.Block
_ = b
- // match: (TESTW (MOVLconst [c]) x)
+ // match: (SUBL x (MOVLconst [c]))
// cond:
- // result: (TESTWconst [c] x)
+ // result: (SUBLconst x [c])
for {
_ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64TESTWconst)
+ c := v_1.AuxInt
+ v.reset(OpAMD64SUBLconst)
v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (TESTW x (MOVLconst [c]))
+ // match: (SUBL (MOVLconst [c]) x)
// cond:
- // result: (TESTWconst [c] x)
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
for {
_ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- c := v_1.AuxInt
- v.reset(OpAMD64TESTWconst)
- v.AuxInt = c
- v.AddArg(x)
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // match: (SUBL x x)
+ // cond:
+ // result: (MOVLconst [0])
for {
_ = v.Args[1]
- l := v.Args[0]
- if l.Op != OpAMD64MOVWload {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- l2 := v.Args[1]
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ x := v.Args[0]
+ if x != v.Args[1] {
break
}
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
- // match: (TESTW l2 l:(MOVWload {sym} [off] ptr mem))
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (SUBLmem x [off] {sym} ptr mem)
for {
_ = v.Args[1]
- l2 := v.Args[0]
+ x := v.Args[0]
l := v.Args[1]
- if l.Op != OpAMD64MOVWload {
+ if l.Op != OpAMD64MOVLload {
break
}
off := l.AuxInt
_ = l.Args[1]
ptr := l.Args[0]
mem := l.Args[1]
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
- break
- }
- b = l.Block
- v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
- v.reset(OpCopy)
- v.AddArg(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
- v0.AddArg(ptr)
- v0.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool {
- // match: (TESTWconst [-1] x)
- // cond:
- // result: (TESTW x x)
- for {
- if v.AuxInt != -1 {
- break
- }
- x := v.Args[0]
- v.reset(OpAMD64TESTW)
- v.AddArg(x)
- v.AddArg(x)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool {
- // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (XADDLlock [off1+off2] {sym} val ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- off2 := v_1.AuxInt
- ptr := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
- break
- }
- v.reset(OpAMD64XADDLlock)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool {
- // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (XADDQlock [off1+off2] {sym} val ptr mem)
- for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- off2 := v_1.AuxInt
- ptr := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64XADDQlock)
- v.AuxInt = off1 + off2
+ v.reset(OpAMD64SUBLmem)
+ v.AuxInt = off
v.Aux = sym
- v.AddArg(val)
+ v.AddArg(x)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool {
- // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
- // result: (XCHGL [off1+off2] {sym} val ptr mem)
+func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool {
+ // match: (SUBLconst [c] x)
+ // cond: int32(c) == 0
+ // result: x
for {
- off1 := v.AuxInt
- sym := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- off2 := v_1.AuxInt
- ptr := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(int32(c) == 0) {
break
}
- v.reset(OpAMD64XCHGL)
- v.AuxInt = off1 + off2
- v.Aux = sym
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
return true
}
- // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
- // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ // match: (SUBLconst [c] x)
+ // cond:
+ // result: (ADDLconst [int64(int32(-c))] x)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
- _ = v.Args[2]
- val := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64LEAQ {
- break
- }
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
- ptr := v_1.Args[0]
- mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
- break
- }
- v.reset(OpAMD64XCHGL)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ c := v.AuxInt
+ x := v.Args[0]
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int64(int32(-c))
+ v.AddArg(x)
return true
}
- return false
}
-func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool {
- // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
+func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (SUBLmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (XCHGQ [off1+off2] {sym} val ptr mem)
+ // result: (SUBLmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
break
}
off2 := v_1.AuxInt
- ptr := v_1.Args[0]
+ base := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64XCHGQ)
+ v.reset(OpAMD64SUBLmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
- v.AddArg(ptr)
+ v.AddArg(base)
v.AddArg(mem)
return true
}
- // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
- // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ // match: (SUBLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SUBLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
}
off2 := v_1.AuxInt
sym2 := v_1.Aux
- ptr := v_1.Args[0]
+ base := v_1.Args[0]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64XCHGQ)
+ v.reset(OpAMD64SUBLmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
- v.AddArg(ptr)
+ v.AddArg(base)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool {
- // match: (XORL x (MOVLconst [c]))
+ // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
- // result: (XORLconst [c] x)
+ // result: (SUBL x (MOVLf2i y))
for {
- _ = v.Args[1]
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := v_1.AuxInt
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL (MOVLconst [c]) x)
- // cond:
- // result: (XORLconst [c] x)
- for {
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
- break
- }
- c := v_0.AuxInt
- x := v.Args[1]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL (SHLLconst x [c]) (SHRLconst x [d]))
- // cond: d==32-c
- // result: (ROLLconst x [c])
- for {
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHLLconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHRLconst {
- break
- }
- d := v_1.AuxInt
- if x != v_1.Args[0] {
- break
- }
- if !(d == 32-c) {
- break
- }
- v.reset(OpAMD64ROLLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL (SHRLconst x [d]) (SHLLconst x [c]))
- // cond: d==32-c
- // result: (ROLLconst x [c])
- for {
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHRLconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLLconst {
- break
- }
- c := v_1.AuxInt
- if x != v_1.Args[0] {
- break
- }
- if !(d == 32-c) {
- break
- }
- v.reset(OpAMD64ROLLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: d==16-c && c < 16 && t.Size() == 2
- // result: (ROLWconst x [c])
- for {
- t := v.Type
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHLLconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHRWconst {
- break
- }
- d := v_1.AuxInt
- if x != v_1.Args[0] {
- break
- }
- if !(d == 16-c && c < 16 && t.Size() == 2) {
- break
- }
- v.reset(OpAMD64ROLWconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c]))
- // cond: d==16-c && c < 16 && t.Size() == 2
- // result: (ROLWconst x [c])
- for {
- t := v.Type
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHRWconst {
+ ptr := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64MOVSSstore {
break
}
- d := v_0.AuxInt
- x := v_0.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLLconst {
+ if v_2.AuxInt != off {
break
}
- c := v_1.AuxInt
- if x != v_1.Args[0] {
+ if v_2.Aux != sym {
break
}
- if !(d == 16-c && c < 16 && t.Size() == 2) {
+ _ = v_2.Args[2]
+ if ptr != v_2.Args[0] {
break
}
- v.reset(OpAMD64ROLWconst)
- v.AuxInt = c
+ y := v_2.Args[1]
+ v.reset(OpAMD64SUBL)
v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
- }
- // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: d==8-c && c < 8 && t.Size() == 1
- // result: (ROLBconst x [c])
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (SUBQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBQconst x [c])
for {
- t := v.Type
_ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHLLconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
+ x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHRBconst {
- break
- }
- d := v_1.AuxInt
- if x != v_1.Args[0] {
+ if v_1.Op != OpAMD64MOVQconst {
break
}
- if !(d == 8-c && c < 8 && t.Size() == 1) {
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
break
}
- v.reset(OpAMD64ROLBconst)
+ v.reset(OpAMD64SUBQconst)
v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c]))
- // cond: d==8-c && c < 8 && t.Size() == 1
- // result: (ROLBconst x [c])
+ // match: (SUBQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEGQ (SUBQconst <v.Type> x [c]))
for {
- t := v.Type
_ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHRBconst {
- break
- }
- d := v_0.AuxInt
- x := v_0.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLLconst {
- break
- }
- c := v_1.AuxInt
- if x != v_1.Args[0] {
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- if !(d == 8-c && c < 8 && t.Size() == 1) {
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
break
}
- v.reset(OpAMD64ROLBconst)
- v.AuxInt = c
- v.AddArg(x)
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- // match: (XORL x x)
+ // match: (SUBQ x x)
// cond:
- // result: (MOVLconst [0])
+ // result: (MOVQconst [0])
for {
_ = v.Args[1]
x := v.Args[0]
if x != v.Args[1] {
break
}
- v.reset(OpAMD64MOVLconst)
+ v.reset(OpAMD64MOVQconst)
v.AuxInt = 0
return true
}
- // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
+ // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
// cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORLmem x [off] {sym} ptr mem)
+ // result: (SUBQmem x [off] {sym} ptr mem)
for {
_ = v.Args[1]
x := v.Args[0]
l := v.Args[1]
- if l.Op != OpAMD64MOVLload {
+ if l.Op != OpAMD64MOVQload {
break
}
off := l.AuxInt
if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64XORLmem)
+ v.reset(OpAMD64SUBQmem)
v.AuxInt = off
v.Aux = sym
v.AddArg(x)
}
return false
}
-func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool {
- // match: (XORL l:(MOVLload [off] {sym} ptr mem) x)
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORLmem x [off] {sym} ptr mem)
+func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool {
+ // match: (SUBQconst [0] x)
+ // cond:
+ // result: x
for {
- _ = v.Args[1]
- l := v.Args[0]
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := l.AuxInt
- sym := l.Aux
- _ = l.Args[1]
- ptr := l.Args[0]
- mem := l.Args[1]
- x := v.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpAMD64XORLmem)
- v.AuxInt = off
- v.Aux = sym
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool {
- // match: (XORLconst [1] (SETNE x))
- // cond:
- // result: (SETEQ x)
+ // match: (SUBQconst [c] x)
+ // cond: c != -(1<<31)
+ // result: (ADDQconst [-c] x)
for {
- if v.AuxInt != 1 {
- break
- }
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETNE {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(c != -(1 << 31)) {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETEQ)
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = -c
v.AddArg(x)
return true
}
- // match: (XORLconst [1] (SETEQ x))
+ // match: (SUBQconst (MOVQconst [d]) [c])
// cond:
- // result: (SETNE x)
+ // result: (MOVQconst [d-c])
for {
- if v.AuxInt != 1 {
- break
- }
+ c := v.AuxInt
v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETEQ {
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETNE)
- v.AddArg(x)
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = d - c
return true
}
- // match: (XORLconst [1] (SETL x))
- // cond:
- // result: (SETGE x)
+ // match: (SUBQconst (SUBQconst x [d]) [c])
+ // cond: is32Bit(-c-d)
+ // result: (ADDQconst [-c-d] x)
for {
- if v.AuxInt != 1 {
- break
- }
+ c := v.AuxInt
v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETL {
+ if v_0.Op != OpAMD64SUBQconst {
break
}
+ d := v_0.AuxInt
x := v_0.Args[0]
- v.reset(OpAMD64SETGE)
+ if !(is32Bit(-c - d)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = -c - d
v.AddArg(x)
return true
}
- // match: (XORLconst [1] (SETGE x))
- // cond:
- // result: (SETL x)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (SUBQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBQmem [off1+off2] {sym} val base mem)
for {
- if v.AuxInt != 1 {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETGE {
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETL)
- v.AddArg(x)
+ v.reset(OpAMD64SUBQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
return true
}
- // match: (XORLconst [1] (SETLE x))
- // cond:
- // result: (SETG x)
+ // match: (SUBQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SUBQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
- if v.AuxInt != 1 {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETLE {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETG)
- v.AddArg(x)
+ v.reset(OpAMD64SUBQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
return true
}
- // match: (XORLconst [1] (SETG x))
+ // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
// cond:
- // result: (SETLE x)
+ // result: (SUBQ x (MOVQf2i y))
for {
- if v.AuxInt != 1 {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ x := v.Args[0]
+ ptr := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64MOVSDstore {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETG {
+ if v_2.AuxInt != off {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETLE)
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [1] (SETB x))
- // cond:
- // result: (SETAE x)
- for {
- if v.AuxInt != 1 {
+ if v_2.Aux != sym {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETB {
+ _ = v_2.Args[2]
+ if ptr != v_2.Args[0] {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETAE)
+ y := v_2.Args[1]
+ v.reset(OpAMD64SUBQ)
v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (XORLconst [1] (SETAE x))
- // cond:
- // result: (SETB x)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool {
+ // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (SUBSDmem x [off] {sym} ptr mem)
for {
- if v.AuxInt != 1 {
+ _ = v.Args[1]
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVSDload {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETAE {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETB)
+ v.reset(OpAMD64SUBSDmem)
+ v.AuxInt = off
+ v.Aux = sym
v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (XORLconst [1] (SETBE x))
- // cond:
- // result: (SETA x)
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (SUBSDmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (SUBSDmem [off1+off2] {sym} val base mem)
for {
- if v.AuxInt != 1 {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETBE {
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETA)
- v.AddArg(x)
+ v.reset(OpAMD64SUBSDmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
return true
}
- // match: (XORLconst [1] (SETA x))
- // cond:
- // result: (SETBE x)
+ // match: (SUBSDmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (SUBSDmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
- if v.AuxInt != 1 {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
break
}
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SETA {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- x := v_0.Args[0]
- v.reset(OpAMD64SETBE)
- v.AddArg(x)
+ v.reset(OpAMD64SUBSDmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool {
- // match: (XORLconst [c] (XORLconst [d] x))
+ // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
// cond:
- // result: (XORLconst [c ^ d] x)
+ // result: (SUBSD x (MOVQi2f y))
for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64XORLconst {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ x := v.Args[0]
+ ptr := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64MOVQstore {
break
}
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64XORLconst)
- v.AuxInt = c ^ d
+ if v_2.AuxInt != off {
+ break
+ }
+ if v_2.Aux != sym {
+ break
+ }
+ _ = v_2.Args[2]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ y := v_2.Args[1]
+ v.reset(OpAMD64SUBSD)
v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
- // match: (XORLconst [c] x)
- // cond: int32(c)==0
- // result: x
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool {
+ // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (SUBSSmem x [off] {sym} ptr mem)
for {
- c := v.AuxInt
+ _ = v.Args[1]
x := v.Args[0]
- if !(int32(c) == 0) {
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVSSload {
break
}
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (XORLconst [c] (MOVLconst [d]))
- // cond:
- // result: (MOVLconst [c^d])
- for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVLconst {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- d := v_0.AuxInt
- v.reset(OpAMD64MOVLconst)
- v.AuxInt = c ^ d
+ v.reset(OpAMD64SUBSSmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool {
+func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (XORLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // match: (SUBSSmem [off1] {sym} val (ADDQconst [off2] base) mem)
// cond: is32Bit(off1+off2)
- // result: (XORLmem [off1+off2] {sym} val base mem)
+ // result: (SUBSSmem [off1+off2] {sym} val base mem)
for {
off1 := v.AuxInt
sym := v.Aux
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64XORLmem)
+ v.reset(OpAMD64SUBSSmem)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (XORLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // match: (SUBSSmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ // result: (SUBSSmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
off1 := v.AuxInt
sym1 := v.Aux
if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64XORLmem)
+ v.reset(OpAMD64SUBSSmem)
v.AuxInt = off1 + off2
v.Aux = mergeSym(sym1, sym2)
v.AddArg(val)
v.AddArg(mem)
return true
}
- // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
// cond:
- // result: (XORL x (MOVLf2i y))
+ // result: (SUBSS x (MOVLi2f y))
for {
off := v.AuxInt
sym := v.Aux
x := v.Args[0]
ptr := v.Args[1]
v_2 := v.Args[2]
- if v_2.Op != OpAMD64MOVSSstore {
+ if v_2.Op != OpAMD64MOVLstore {
break
}
if v_2.AuxInt != off {
if ptr != v_2.Args[0] {
break
}
- y := v_2.Args[1]
- v.reset(OpAMD64XORL)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
- v0.AddArg(y)
+ y := v_2.Args[1]
+ v.reset(OpAMD64SUBSS)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TESTB (MOVLconst [c]) x)
+ // cond:
+ // result: (TESTBconst [c] x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (TESTB x (MOVLconst [c]))
+ // cond:
+ // result: (TESTBconst [c] x)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
+ // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ for {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ l2 := v.Args[1]
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
+ v.reset(OpCopy)
v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(0, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool {
- // match: (XORQ x (MOVQconst [c]))
- // cond: is32Bit(c)
- // result: (XORQconst [c] x)
+ // match: (TESTB l2 l:(MOVBload {sym} [off] ptr mem))
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
+ // result: @l.Block (CMPBconstmem {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64MOVQconst {
+ l2 := v.Args[0]
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVBload {
break
}
- c := v_1.AuxInt
- if !(is32Bit(c)) {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
break
}
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c
- v.AddArg(x)
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconstmem, types.TypeFlags)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(0, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (XORQ (MOVQconst [c]) x)
- // cond: is32Bit(c)
- // result: (XORQconst [c] x)
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TESTL (MOVLconst [c]) x)
+ // cond:
+ // result: (TESTLconst [c] x)
for {
_ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
c := v_0.AuxInt
x := v.Args[1]
- if !(is32Bit(c)) {
- break
- }
- v.reset(OpAMD64XORQconst)
+ v.reset(OpAMD64TESTLconst)
v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d]))
- // cond: d==64-c
- // result: (ROLQconst x [c])
+ // match: (TESTL x (MOVLconst [c]))
+ // cond:
+ // result: (TESTLconst [c] x)
for {
_ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHLQconst {
- break
- }
- c := v_0.AuxInt
- x := v_0.Args[0]
+ x := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHRQconst {
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- d := v_1.AuxInt
- if x != v_1.Args[0] {
+ c := v_1.AuxInt
+ v.reset(OpAMD64TESTLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
+ // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ for {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVLload {
break
}
- if !(d == 64-c) {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ l2 := v.Args[1]
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
break
}
- v.reset(OpAMD64ROLQconst)
- v.AuxInt = c
- v.AddArg(x)
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(0, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c]))
- // cond: d==64-c
- // result: (ROLQconst x [c])
+ // match: (TESTL l2 l:(MOVLload {sym} [off] ptr mem))
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
+ // result: @l.Block (CMPLconstmem {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64SHRQconst {
+ l2 := v.Args[0]
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVLload {
break
}
- d := v_0.AuxInt
- x := v_0.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpAMD64SHLQconst {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
break
}
- c := v_1.AuxInt
- if x != v_1.Args[0] {
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconstmem, types.TypeFlags)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(0, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TESTQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (TESTQconst [c] x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- if !(d == 64-c) {
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
break
}
- v.reset(OpAMD64ROLQconst)
+ v.reset(OpAMD64TESTQconst)
v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (XORQ x x)
- // cond:
- // result: (MOVQconst [0])
+ // match: (TESTQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (TESTQconst [c] x)
for {
_ = v.Args[1]
x := v.Args[0]
- if x != v.Args[1] {
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = 0
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64TESTQconst)
+ v.AuxInt = c
+ v.AddArg(x)
return true
}
- // match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORQmem x [off] {sym} ptr mem)
+ // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
+ // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
- x := v.Args[0]
- l := v.Args[1]
+ l := v.Args[0]
if l.Op != OpAMD64MOVQload {
break
}
_ = l.Args[1]
ptr := l.Args[0]
mem := l.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ l2 := v.Args[1]
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
break
}
- v.reset(OpAMD64XORQmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(0, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
- // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x)
- // cond: canMergeLoad(v, l, x) && clobber(l)
- // result: (XORQmem x [off] {sym} ptr mem)
+ // match: (TESTQ l2 l:(MOVQload {sym} [off] ptr mem))
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
+ // result: @l.Block (CMPQconstmem {sym} [makeValAndOff(0,off)] ptr mem)
for {
_ = v.Args[1]
- l := v.Args[0]
+ l2 := v.Args[0]
+ l := v.Args[1]
if l.Op != OpAMD64MOVQload {
break
}
_ = l.Args[1]
ptr := l.Args[0]
mem := l.Args[1]
- x := v.Args[1]
- if !(canMergeLoad(v, l, x) && clobber(l)) {
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
break
}
- v.reset(OpAMD64XORQmem)
- v.AuxInt = off
- v.Aux = sym
- v.AddArg(x)
- v.AddArg(ptr)
- v.AddArg(mem)
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconstmem, types.TypeFlags)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(0, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool {
- // match: (XORQconst [c] (XORQconst [d] x))
+func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TESTW (MOVLconst [c]) x)
// cond:
- // result: (XORQconst [c ^ d] x)
+ // result: (TESTWconst [c] x)
for {
- c := v.AuxInt
+ _ = v.Args[1]
v_0 := v.Args[0]
- if v_0.Op != OpAMD64XORQconst {
+ if v_0.Op != OpAMD64MOVLconst {
break
}
- d := v_0.AuxInt
- x := v_0.Args[0]
- v.reset(OpAMD64XORQconst)
- v.AuxInt = c ^ d
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (XORQconst [0] x)
+ // match: (TESTW x (MOVLconst [c]))
// cond:
- // result: x
+ // result: (TESTWconst [c] x)
for {
- if v.AuxInt != 0 {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
break
}
- x := v.Args[0]
- v.reset(OpCopy)
- v.Type = x.Type
+ c := v_1.AuxInt
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = c
v.AddArg(x)
return true
}
- // match: (XORQconst [c] (MOVQconst [d]))
- // cond:
- // result: (MOVQconst [c^d])
+ // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
+ // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(0,off)] ptr mem)
for {
- c := v.AuxInt
- v_0 := v.Args[0]
- if v_0.Op != OpAMD64MOVQconst {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVWload {
break
}
- d := v_0.AuxInt
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = c ^ d
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ l2 := v.Args[1]
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(0, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ return true
+ }
+ // match: (TESTW l2 l:(MOVWload {sym} [off] ptr mem))
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
+ // result: @l.Block (CMPWconstmem {sym} [makeValAndOff(0,off)] ptr mem)
+ for {
+ _ = v.Args[1]
+ l2 := v.Args[0]
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconstmem, types.TypeFlags)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = makeValAndOff(0, off)
+ v0.Aux = sym
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (XORQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool {
+ // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
- // result: (XORQmem [off1+off2] {sym} val base mem)
+ // result: (XADDLlock [off1+off2] {sym} val ptr mem)
for {
off1 := v.AuxInt
sym := v.Aux
break
}
off2 := v_1.AuxInt
- base := v_1.Args[0]
+ ptr := v_1.Args[0]
mem := v.Args[2]
if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64XORQmem)
+ v.reset(OpAMD64XADDLlock)
v.AuxInt = off1 + off2
v.Aux = sym
v.AddArg(val)
- v.AddArg(base)
+ v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (XORQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
- // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
- // result: (XORQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ return false
+}
+func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool {
+ // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XADDQlock [off1+off2] {sym} val ptr mem)
for {
off1 := v.AuxInt
- sym1 := v.Aux
+ sym := v.Aux
_ = v.Args[2]
val := v.Args[0]
v_1 := v.Args[1]
- if v_1.Op != OpAMD64LEAQ {
+ if v_1.Op != OpAMD64ADDQconst {
break
}
off2 := v_1.AuxInt
- sym2 := v_1.Aux
- base := v_1.Args[0]
+ ptr := v_1.Args[0]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64XORQmem)
+ v.reset(OpAMD64XADDQlock)
v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.Aux = sym
v.AddArg(val)
- v.AddArg(base)
+ v.AddArg(ptr)
v.AddArg(mem)
return true
}
- // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
- // cond:
- // result: (XORQ x (MOVQf2i y))
+ return false
+}
+func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool {
+ // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XCHGL [off1+off2] {sym} val ptr mem)
for {
- off := v.AuxInt
+ off1 := v.AuxInt
sym := v.Aux
_ = v.Args[2]
- x := v.Args[0]
- ptr := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64MOVSDstore {
- break
- }
- if v_2.AuxInt != off {
- break
- }
- if v_2.Aux != sym {
- break
- }
- _ = v_2.Args[2]
- if ptr != v_2.Args[0] {
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- y := v_2.Args[1]
- v.reset(OpAMD64XORQ)
- v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
- v0.AddArg(y)
- v.AddArg(v0)
- return true
- }
- return false
-}
-func rewriteValueAMD64_OpAdd16_0(v *Value) bool {
- // match: (Add16 x y)
- // cond:
- // result: (ADDL x y)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd32_0(v *Value) bool {
- // match: (Add32 x y)
- // cond:
- // result: (ADDL x y)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd32F_0(v *Value) bool {
- // match: (Add32F x y)
- // cond:
- // result: (ADDSS x y)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDSS)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd64_0(v *Value) bool {
- // match: (Add64 x y)
- // cond:
- // result: (ADDQ x y)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd64F_0(v *Value) bool {
- // match: (Add64F x y)
- // cond:
- // result: (ADDSD x y)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDSD)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAdd8_0(v *Value) bool {
- // match: (Add8 x y)
- // cond:
- // result: (ADDL x y)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAddPtr_0(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
- // match: (AddPtr x y)
- // cond: config.PtrSize == 8
- // result: (ADDQ x y)
- for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- if !(config.PtrSize == 8) {
+ off2 := v_1.AuxInt
+ ptr := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64ADDQ)
- v.AddArg(x)
- v.AddArg(y)
+ v.reset(OpAMD64XCHGL)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (AddPtr x y)
- // cond: config.PtrSize == 4
- // result: (ADDL x y)
+ // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- if !(config.PtrSize == 4) {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
break
}
- v.reset(OpAMD64ADDL)
- v.AddArg(x)
- v.AddArg(y)
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64XCHGL)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAddr_0(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
- // match: (Addr {sym} base)
- // cond: config.PtrSize == 8
- // result: (LEAQ {sym} base)
+func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool {
+ // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XCHGQ [off1+off2] {sym} val ptr mem)
for {
+ off1 := v.AuxInt
sym := v.Aux
- base := v.Args[0]
- if !(config.PtrSize == 8) {
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- v.reset(OpAMD64LEAQ)
+ off2 := v_1.AuxInt
+ ptr := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64XCHGQ)
+ v.AuxInt = off1 + off2
v.Aux = sym
- v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (Addr {sym} base)
- // cond: config.PtrSize == 4
- // result: (LEAL {sym} base)
+ // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
for {
- sym := v.Aux
- base := v.Args[0]
- if !(config.PtrSize == 4) {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
break
}
- v.reset(OpAMD64LEAL)
- v.Aux = sym
- v.AddArg(base)
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ ptr := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64XCHGQ)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
return false
}
-func rewriteValueAMD64_OpAnd16_0(v *Value) bool {
- // match: (And16 x y)
+func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool {
+ // match: (XORL x (MOVLconst [c]))
// cond:
- // result: (ANDL x y)
+ // result: (XORLconst [c] x)
for {
_ = v.Args[1]
x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(y)
return true
}
-}
-func rewriteValueAMD64_OpAnd32_0(v *Value) bool {
- // match: (And32 x y)
+ // match: (XORL (MOVLconst [c]) x)
// cond:
- // result: (ANDL x y)
+ // result: (XORLconst [c] x)
for {
_ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(y)
return true
}
-}
-func rewriteValueAMD64_OpAnd64_0(v *Value) bool {
- // match: (And64 x y)
- // cond:
- // result: (ANDQ x y)
+ // match: (XORL (SHLLconst x [c]) (SHRLconst x [d]))
+ // cond: d==32-c
+ // result: (ROLLconst x [c])
for {
_ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDQ)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ d := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(d == 32-c) {
+ break
+ }
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(y)
return true
}
-}
-func rewriteValueAMD64_OpAnd8_0(v *Value) bool {
- // match: (And8 x y)
- // cond:
- // result: (ANDL x y)
+ // match: (XORL (SHRLconst x [d]) (SHLLconst x [c]))
+ // cond: d==32-c
+ // result: (ROLLconst x [c])
for {
_ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(d == 32-c) {
+ break
+ }
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(y)
return true
}
-}
-func rewriteValueAMD64_OpAndB_0(v *Value) bool {
- // match: (AndB x y)
- // cond:
- // result: (ANDL x y)
+ // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: d==16-c && c < 16 && t.Size() == 2
+ // result: (ROLWconst x [c])
for {
+ t := v.Type
_ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64ANDL)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHRWconst {
+ break
+ }
+ d := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(d == 16-c && c < 16 && t.Size() == 2) {
+ break
+ }
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(y)
- return true
- }
-}
-func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (AtomicAdd32 ptr val mem)
- // cond:
- // result: (AddTupleFirst32 val (XADDLlock val ptr mem))
- for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64AddTupleFirst32)
- v.AddArg(val)
- v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (AtomicAdd64 ptr val mem)
- // cond:
- // result: (AddTupleFirst64 val (XADDQlock val ptr mem))
- for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64AddTupleFirst64)
- v.AddArg(val)
- v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
- return true
- }
-}
-func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool {
- // match: (AtomicAnd8 ptr val mem)
- // cond:
- // result: (ANDBlock ptr val mem)
- for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64ANDBlock)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
return true
}
-}
-func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool {
- // match: (AtomicCompareAndSwap32 ptr old new_ mem)
- // cond:
- // result: (CMPXCHGLlock ptr old new_ mem)
+ // match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c]))
+ // cond: d==16-c && c < 16 && t.Size() == 2
+ // result: (ROLWconst x [c])
for {
- _ = v.Args[3]
- ptr := v.Args[0]
- old := v.Args[1]
- new_ := v.Args[2]
- mem := v.Args[3]
- v.reset(OpAMD64CMPXCHGLlock)
- v.AddArg(ptr)
- v.AddArg(old)
- v.AddArg(new_)
- v.AddArg(mem)
+ t := v.Type
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRWconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(d == 16-c && c < 16 && t.Size() == 2) {
+ break
+ }
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = c
+ v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool {
- // match: (AtomicCompareAndSwap64 ptr old new_ mem)
- // cond:
- // result: (CMPXCHGQlock ptr old new_ mem)
+ // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: d==8-c && c < 8 && t.Size() == 1
+ // result: (ROLBconst x [c])
for {
- _ = v.Args[3]
- ptr := v.Args[0]
- old := v.Args[1]
- new_ := v.Args[2]
- mem := v.Args[3]
- v.reset(OpAMD64CMPXCHGQlock)
- v.AddArg(ptr)
- v.AddArg(old)
- v.AddArg(new_)
- v.AddArg(mem)
+ t := v.Type
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHRBconst {
+ break
+ }
+ d := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(d == 8-c && c < 8 && t.Size() == 1) {
+ break
+ }
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = c
+ v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool {
- // match: (AtomicExchange32 ptr val mem)
- // cond:
- // result: (XCHGL val ptr mem)
+ // match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c]))
+ // cond: d==8-c && c < 8 && t.Size() == 1
+ // result: (ROLBconst x [c])
for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64XCHGL)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ t := v.Type
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(d == 8-c && c < 8 && t.Size() == 1) {
+ break
+ }
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = c
+ v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool {
- // match: (AtomicExchange64 ptr val mem)
+ // match: (XORL x x)
// cond:
- // result: (XCHGQ val ptr mem)
+ // result: (MOVLconst [0])
for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64XCHGQ)
- v.AddArg(val)
- v.AddArg(ptr)
- v.AddArg(mem)
+ _ = v.Args[1]
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = 0
return true
}
-}
-func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool {
- // match: (AtomicLoad32 ptr mem)
- // cond:
- // result: (MOVLatomicload ptr mem)
+ // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (XORLmem x [off] {sym} ptr mem)
for {
_ = v.Args[1]
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64MOVLatomicload)
+ x := v.Args[0]
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64XORLmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
+ return false
}
-func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool {
- // match: (AtomicLoad64 ptr mem)
- // cond:
- // result: (MOVQatomicload ptr mem)
+func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool {
+ // match: (XORL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (XORLmem x [off] {sym} ptr mem)
for {
_ = v.Args[1]
- ptr := v.Args[0]
- mem := v.Args[1]
- v.reset(OpAMD64MOVQatomicload)
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ x := v.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64XORLmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(x)
v.AddArg(ptr)
v.AddArg(mem)
return true
}
+ return false
}
-func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
- // match: (AtomicLoadPtr ptr mem)
- // cond: config.PtrSize == 8
- // result: (MOVQatomicload ptr mem)
+func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool {
+ // match: (XORLconst [1] (SETNE x))
+ // cond:
+ // result: (SETEQ x)
for {
- _ = v.Args[1]
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(config.PtrSize == 8) {
+ if v.AuxInt != 1 {
break
}
- v.reset(OpAMD64MOVQatomicload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETNE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETEQ)
+ v.AddArg(x)
return true
}
- // match: (AtomicLoadPtr ptr mem)
- // cond: config.PtrSize == 4
- // result: (MOVLatomicload ptr mem)
+ // match: (XORLconst [1] (SETEQ x))
+ // cond:
+ // result: (SETNE x)
for {
- _ = v.Args[1]
- ptr := v.Args[0]
- mem := v.Args[1]
- if !(config.PtrSize == 4) {
+ if v.AuxInt != 1 {
break
}
- v.reset(OpAMD64MOVLatomicload)
- v.AddArg(ptr)
- v.AddArg(mem)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETEQ {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETNE)
+ v.AddArg(x)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool {
- // match: (AtomicOr8 ptr val mem)
+ // match: (XORLconst [1] (SETL x))
// cond:
- // result: (ORBlock ptr val mem)
+ // result: (SETGE x)
for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpAMD64ORBlock)
- v.AddArg(ptr)
- v.AddArg(val)
- v.AddArg(mem)
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETL {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETGE)
+ v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (AtomicStore32 ptr val mem)
+ // match: (XORLconst [1] (SETGE x))
// cond:
- // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
+ // result: (SETL x)
for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETGE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETL)
+ v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (AtomicStore64 ptr val mem)
+ // match: (XORLconst [1] (SETLE x))
// cond:
- // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
+ // result: (SETG x)
for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETLE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETG)
+ v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
- typ := &b.Func.Config.Types
- _ = typ
- // match: (AtomicStorePtrNoWB ptr val mem)
- // cond: config.PtrSize == 8
- // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+ // match: (XORLconst [1] (SETG x))
+ // cond:
+ // result: (SETLE x)
for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(config.PtrSize == 8) {
+ if v.AuxInt != 1 {
break
}
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETLE)
+ v.AddArg(x)
return true
}
- // match: (AtomicStorePtrNoWB ptr val mem)
- // cond: config.PtrSize == 4
- // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+ // match: (XORLconst [1] (SETB x))
+ // cond:
+ // result: (SETAE x)
for {
- _ = v.Args[2]
- ptr := v.Args[0]
- val := v.Args[1]
- mem := v.Args[2]
- if !(config.PtrSize == 4) {
+ if v.AuxInt != 1 {
break
}
- v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETB {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETAE)
+ v.AddArg(x)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAvg64u_0(v *Value) bool {
- // match: (Avg64u x y)
+ // match: (XORLconst [1] (SETAE x))
// cond:
- // result: (AVGQU x y)
+ // result: (SETB x)
for {
- _ = v.Args[1]
- x := v.Args[0]
- y := v.Args[1]
- v.reset(OpAMD64AVGQU)
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETAE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETB)
v.AddArg(x)
- v.AddArg(y)
return true
}
-}
-func rewriteValueAMD64_OpBitLen32_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (BitLen32 x)
+ // match: (XORLconst [1] (SETBE x))
// cond:
- // result: (BitLen64 (MOVLQZX <typ.UInt64> x))
+ // result: (SETA x)
for {
- x := v.Args[0]
- v.reset(OpBitLen64)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
- v0.AddArg(x)
- v.AddArg(v0)
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETBE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETA)
+ v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpBitLen64_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
- // match: (BitLen64 <t> x)
+ // match: (XORLconst [1] (SETA x))
// cond:
- // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
+ // result: (SETBE x)
for {
- t := v.Type
- x := v.Args[0]
- v.reset(OpAMD64ADDQconst)
- v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
- v1 := b.NewValue0(v.Pos, OpSelect0, t)
- v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
- v2.AddArg(x)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
- v3.AuxInt = -1
- v0.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
- v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
- v5.AddArg(x)
- v4.AddArg(v5)
- v0.AddArg(v4)
- v.AddArg(v0)
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SETA {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETBE)
+ v.AddArg(x)
return true
}
+ return false
}
-func rewriteValueAMD64_OpBswap32_0(v *Value) bool {
- // match: (Bswap32 x)
+func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool {
+ // match: (XORLconst [c] (XORLconst [d] x))
// cond:
- // result: (BSWAPL x)
+ // result: (XORLconst [c ^ d] x)
for {
- x := v.Args[0]
- v.reset(OpAMD64BSWAPL)
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c ^ d
v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpBswap64_0(v *Value) bool {
- // match: (Bswap64 x)
- // cond:
- // result: (BSWAPQ x)
+ // match: (XORLconst [c] x)
+ // cond: int32(c)==0
+ // result: x
for {
+ c := v.AuxInt
x := v.Args[0]
- v.reset(OpAMD64BSWAPQ)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpCeil_0(v *Value) bool {
- // match: (Ceil x)
+ // match: (XORLconst [c] (MOVLconst [d]))
// cond:
- // result: (ROUNDSD [2] x)
+ // result: (MOVLconst [c^d])
for {
- x := v.Args[0]
- v.reset(OpAMD64ROUNDSD)
- v.AuxInt = 2
- v.AddArg(x)
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = c ^ d
return true
}
+ return false
}
-func rewriteValueAMD64_OpClosureCall_0(v *Value) bool {
- // match: (ClosureCall [argwid] entry closure mem)
- // cond:
- // result: (CALLclosure [argwid] entry closure mem)
+func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (XORLmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XORLmem [off1+off2] {sym} val base mem)
for {
- argwid := v.AuxInt
+ off1 := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
- entry := v.Args[0]
- closure := v.Args[1]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
mem := v.Args[2]
- v.reset(OpAMD64CALLclosure)
- v.AuxInt = argwid
- v.AddArg(entry)
- v.AddArg(closure)
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64XORLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
v.AddArg(mem)
return true
}
-}
-func rewriteValueAMD64_OpCom16_0(v *Value) bool {
- // match: (Com16 x)
+ // match: (XORLmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (XORLmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
// cond:
- // result: (NOTL x)
+ // result: (XORL x (MOVLf2i y))
for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
x := v.Args[0]
- v.reset(OpAMD64NOTL)
+ ptr := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpAMD64MOVSSstore {
+ break
+ }
+ if v_2.AuxInt != off {
+ break
+ }
+ if v_2.Aux != sym {
+ break
+ }
+ _ = v_2.Args[2]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ y := v_2.Args[1]
+ v.reset(OpAMD64XORL)
v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
+ return false
}
-func rewriteValueAMD64_OpCom32_0(v *Value) bool {
- // match: (Com32 x)
- // cond:
- // result: (NOTL x)
+func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool {
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
for {
+ _ = v.Args[1]
x := v.Args[0]
- v.reset(OpAMD64NOTL)
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c
v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpCom64_0(v *Value) bool {
- // match: (Com64 x)
- // cond:
- // result: (NOTQ x)
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (XORQconst [c] x)
for {
- x := v.Args[0]
- v.reset(OpAMD64NOTQ)
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c
v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpCom8_0(v *Value) bool {
- // match: (Com8 x)
- // cond:
- // result: (NOTL x)
+ // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d]))
+ // cond: d==64-c
+ // result: (ROLQconst x [c])
for {
- x := v.Args[0]
- v.reset(OpAMD64NOTL)
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(d == 64-c) {
+ break
+ }
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = c
v.AddArg(x)
return true
}
-}
-func rewriteValueAMD64_OpCondSelect_0(v *Value) bool {
- // match: (CondSelect <t> x y (SETEQ cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQEQ y x cond)
+ // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c]))
+ // cond: d==64-c
+ // result: (ROLQconst x [c])
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETEQ {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRQconst {
break
}
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLQconst {
break
}
- v.reset(OpAMD64CMOVQEQ)
- v.AddArg(y)
+ c := v_1.AuxInt
+ if x != v_1.Args[0] {
+ break
+ }
+ if !(d == 64-c) {
+ break
+ }
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = c
v.AddArg(x)
- v.AddArg(cond)
return true
}
- // match: (CondSelect <t> x y (SETNE cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQNE y x cond)
+ // match: (XORQ x x)
+ // cond:
+ // result: (MOVQconst [0])
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ if x != v.Args[1] {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = 0
+ return true
+ }
+ // match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (XORQmem x [off] {sym} ptr mem)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETNE {
+ l := v.Args[1]
+ if l.Op != OpAMD64MOVQload {
break
}
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64CMOVQNE)
- v.AddArg(y)
+ v.reset(OpAMD64XORQmem)
+ v.AuxInt = off
+ v.Aux = sym
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETL cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQLT y x cond)
+ // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l, x) && clobber(l)
+ // result: (XORQmem x [off] {sym} ptr mem)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETL {
+ _ = v.Args[1]
+ l := v.Args[0]
+ if l.Op != OpAMD64MOVQload {
break
}
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ off := l.AuxInt
+ sym := l.Aux
+ _ = l.Args[1]
+ ptr := l.Args[0]
+ mem := l.Args[1]
+ x := v.Args[1]
+ if !(canMergeLoad(v, l, x) && clobber(l)) {
break
}
- v.reset(OpAMD64CMOVQLT)
- v.AddArg(y)
+ v.reset(OpAMD64XORQmem)
+ v.AuxInt = off
+ v.Aux = sym
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETG cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQGT y x cond)
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool {
+ // match: (XORQconst [c] (XORQconst [d] x))
+ // cond:
+ // result: (XORQconst [c ^ d] x)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETG {
- break
- }
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORQconst {
break
}
- v.reset(OpAMD64CMOVQGT)
- v.AddArg(y)
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c ^ d
v.AddArg(x)
- v.AddArg(cond)
return true
}
- // match: (CondSelect <t> x y (SETLE cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQLE y x cond)
+ // match: (XORQconst [0] x)
+ // cond:
+ // result: x
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETLE {
- break
- }
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ if v.AuxInt != 0 {
break
}
- v.reset(OpAMD64CMOVQLE)
- v.AddArg(y)
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
v.AddArg(x)
- v.AddArg(cond)
return true
}
- // match: (CondSelect <t> x y (SETGE cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQGE y x cond)
+ // match: (XORQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [c^d])
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGE {
- break
- }
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
break
}
- v.reset(OpAMD64CMOVQGE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = c ^ d
return true
}
- // match: (CondSelect <t> x y (SETA cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQHI y x cond)
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (XORQmem [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (XORQmem [off1+off2] {sym} val base mem)
for {
- t := v.Type
+ off1 := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETA {
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ADDQconst {
break
}
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ off2 := v_1.AuxInt
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
break
}
- v.reset(OpAMD64CMOVQHI)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpAMD64XORQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETB cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQCS y x cond)
+ // match: (XORQmem [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (XORQmem [off1+off2] {mergeSym(sym1,sym2)} val base mem)
for {
- t := v.Type
+ off1 := v.AuxInt
+ sym1 := v.Aux
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETB {
+ val := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64LEAQ {
break
}
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ off2 := v_1.AuxInt
+ sym2 := v_1.Aux
+ base := v_1.Args[0]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
break
}
- v.reset(OpAMD64CMOVQCS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpAMD64XORQmem)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(val)
+ v.AddArg(base)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETAE cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQCC y x cond)
+ // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // cond:
+ // result: (XORQ x (MOVQf2i y))
for {
- t := v.Type
+ off := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
+ ptr := v.Args[1]
v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETAE {
+ if v_2.Op != OpAMD64MOVSDstore {
break
}
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ if v_2.AuxInt != off {
break
}
- v.reset(OpAMD64CMOVQCC)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
- return true
- }
- // match: (CondSelect <t> x y (SETBE cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQLS y x cond)
- for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETBE {
+ if v_2.Aux != sym {
break
}
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
+ _ = v_2.Args[2]
+ if ptr != v_2.Args[0] {
break
}
- v.reset(OpAMD64CMOVQLS)
- v.AddArg(y)
+ y := v_2.Args[1]
+ v.reset(OpAMD64XORQ)
v.AddArg(x)
- v.AddArg(cond)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg(v0)
return true
}
return false
}
-func rewriteValueAMD64_OpCondSelect_10(v *Value) bool {
- // match: (CondSelect <t> x y (SETEQF cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQEQF y x cond)
+func rewriteValueAMD64_OpAdd16_0(v *Value) bool {
+ // match: (Add16 x y)
+ // cond:
+ // result: (ADDL x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETEQF {
- break
- }
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpAMD64CMOVQEQF)
- v.AddArg(y)
+ v.reset(OpAMD64ADDL)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETNEF cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQNEF y x cond)
+}
+func rewriteValueAMD64_OpAdd32_0(v *Value) bool {
+ // match: (Add32 x y)
+ // cond:
+ // result: (ADDL x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETNEF {
- break
- }
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpAMD64CMOVQNEF)
- v.AddArg(y)
+ v.reset(OpAMD64ADDL)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETGF cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQGTF y x cond)
+}
+func rewriteValueAMD64_OpAdd32F_0(v *Value) bool {
+ // match: (Add32F x y)
+ // cond:
+ // result: (ADDSS x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGF {
- break
- }
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpAMD64CMOVQGTF)
- v.AddArg(y)
+ v.reset(OpAMD64ADDSS)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETGEF cond))
- // cond: (is64BitInt(t) || isPtr(t))
- // result: (CMOVQGEF y x cond)
+}
+func rewriteValueAMD64_OpAdd64_0(v *Value) bool {
+ // match: (Add64 x y)
+ // cond:
+ // result: (ADDQ x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGEF {
- break
- }
- cond := v_2.Args[0]
- if !(is64BitInt(t) || isPtr(t)) {
- break
- }
- v.reset(OpAMD64CMOVQGEF)
- v.AddArg(y)
+ v.reset(OpAMD64ADDQ)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETEQ cond))
- // cond: is32BitInt(t)
- // result: (CMOVLEQ y x cond)
+}
+func rewriteValueAMD64_OpAdd64F_0(v *Value) bool {
+ // match: (Add64F x y)
+ // cond:
+ // result: (ADDSD x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETEQ {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLEQ)
- v.AddArg(y)
+ v.reset(OpAMD64ADDSD)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETNE cond))
- // cond: is32BitInt(t)
- // result: (CMOVLNE y x cond)
+}
+func rewriteValueAMD64_OpAdd8_0(v *Value) bool {
+ // match: (Add8 x y)
+ // cond:
+ // result: (ADDL x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETNE {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLNE)
- v.AddArg(y)
+ v.reset(OpAMD64ADDL)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETL cond))
- // cond: is32BitInt(t)
- // result: (CMOVLLT y x cond)
+}
+func rewriteValueAMD64_OpAddPtr_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (AddPtr x y)
+ // cond: config.PtrSize == 8
+ // result: (ADDQ x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETL {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
+ if !(config.PtrSize == 8) {
break
}
- v.reset(OpAMD64CMOVLLT)
- v.AddArg(y)
+ v.reset(OpAMD64ADDQ)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETG cond))
- // cond: is32BitInt(t)
- // result: (CMOVLGT y x cond)
+ // match: (AddPtr x y)
+ // cond: config.PtrSize == 4
+ // result: (ADDL x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETG {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
+ if !(config.PtrSize == 4) {
break
}
- v.reset(OpAMD64CMOVLGT)
- v.AddArg(y)
+ v.reset(OpAMD64ADDL)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETLE cond))
- // cond: is32BitInt(t)
- // result: (CMOVLLE y x cond)
+ return false
+}
+func rewriteValueAMD64_OpAddr_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (Addr {sym} base)
+ // cond: config.PtrSize == 8
+ // result: (LEAQ {sym} base)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETLE {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
+ sym := v.Aux
+ base := v.Args[0]
+ if !(config.PtrSize == 8) {
break
}
- v.reset(OpAMD64CMOVLLE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpAMD64LEAQ)
+ v.Aux = sym
+ v.AddArg(base)
return true
}
- // match: (CondSelect <t> x y (SETGE cond))
- // cond: is32BitInt(t)
- // result: (CMOVLGE y x cond)
+ // match: (Addr {sym} base)
+ // cond: config.PtrSize == 4
+ // result: (LEAL {sym} base)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGE {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
+ sym := v.Aux
+ base := v.Args[0]
+ if !(config.PtrSize == 4) {
break
}
- v.reset(OpAMD64CMOVLGE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpAMD64LEAL)
+ v.Aux = sym
+ v.AddArg(base)
return true
}
return false
}
-func rewriteValueAMD64_OpCondSelect_20(v *Value) bool {
- // match: (CondSelect <t> x y (SETA cond))
- // cond: is32BitInt(t)
- // result: (CMOVLHI y x cond)
+func rewriteValueAMD64_OpAnd16_0(v *Value) bool {
+ // match: (And16 x y)
+ // cond:
+ // result: (ANDL x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETA {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLHI)
- v.AddArg(y)
+ v.reset(OpAMD64ANDL)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETB cond))
- // cond: is32BitInt(t)
- // result: (CMOVLCS y x cond)
+}
+func rewriteValueAMD64_OpAnd32_0(v *Value) bool {
+ // match: (And32 x y)
+ // cond:
+ // result: (ANDL x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETB {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLCS)
- v.AddArg(y)
+ v.reset(OpAMD64ANDL)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETAE cond))
- // cond: is32BitInt(t)
- // result: (CMOVLCC y x cond)
+}
+func rewriteValueAMD64_OpAnd64_0(v *Value) bool {
+ // match: (And64 x y)
+ // cond:
+ // result: (ANDQ x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETAE {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLCC)
- v.AddArg(y)
+ v.reset(OpAMD64ANDQ)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETBE cond))
- // cond: is32BitInt(t)
- // result: (CMOVLLS y x cond)
+}
+func rewriteValueAMD64_OpAnd8_0(v *Value) bool {
+ // match: (And8 x y)
+ // cond:
+ // result: (ANDL x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETBE {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLLS)
- v.AddArg(y)
+ v.reset(OpAMD64ANDL)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETEQF cond))
- // cond: is32BitInt(t)
- // result: (CMOVLEQF y x cond)
+}
+func rewriteValueAMD64_OpAndB_0(v *Value) bool {
+ // match: (AndB x y)
+ // cond:
+ // result: (ANDL x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETEQF {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLEQF)
- v.AddArg(y)
+ v.reset(OpAMD64ANDL)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETNEF cond))
- // cond: is32BitInt(t)
- // result: (CMOVLNEF y x cond)
+}
+func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (AtomicAdd32 ptr val mem)
+ // cond:
+ // result: (AddTupleFirst32 val (XADDLlock val ptr mem))
for {
- t := v.Type
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETNEF {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLNEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64AddTupleFirst32)
+ v.AddArg(val)
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg(val)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (AtomicAdd64 ptr val mem)
+ // cond:
+ // result: (AddTupleFirst64 val (XADDQlock val ptr mem))
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64AddTupleFirst64)
+ v.AddArg(val)
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg(val)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool {
+ // match: (AtomicAnd8 ptr val mem)
+ // cond:
+ // result: (ANDBlock ptr val mem)
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64ANDBlock)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool {
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // cond:
+ // result: (CMPXCHGLlock ptr old new_ mem)
+ for {
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ old := v.Args[1]
+ new_ := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpAMD64CMPXCHGLlock)
+ v.AddArg(ptr)
+ v.AddArg(old)
+ v.AddArg(new_)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETGF cond))
- // cond: is32BitInt(t)
- // result: (CMOVLGTF y x cond)
+}
+func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool {
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // cond:
+ // result: (CMPXCHGQlock ptr old new_ mem)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGF {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLGTF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ old := v.Args[1]
+ new_ := v.Args[2]
+ mem := v.Args[3]
+ v.reset(OpAMD64CMPXCHGQlock)
+ v.AddArg(ptr)
+ v.AddArg(old)
+ v.AddArg(new_)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETGEF cond))
- // cond: is32BitInt(t)
- // result: (CMOVLGEF y x cond)
+}
+func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool {
+ // match: (AtomicExchange32 ptr val mem)
+ // cond:
+ // result: (XCHGL val ptr mem)
for {
- t := v.Type
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGEF {
- break
- }
- cond := v_2.Args[0]
- if !(is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLGEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64XCHGL)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETEQ cond))
- // cond: is16BitInt(t)
- // result: (CMOVWEQ y x cond)
+}
+func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool {
+ // match: (AtomicExchange64 ptr val mem)
+ // cond:
+ // result: (XCHGQ val ptr mem)
for {
- t := v.Type
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETEQ {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWEQ)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64XCHGQ)
+ v.AddArg(val)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETNE cond))
- // cond: is16BitInt(t)
- // result: (CMOVWNE y x cond)
+}
+func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool {
+ // match: (AtomicLoad32 ptr mem)
+ // cond:
+ // result: (MOVLatomicload ptr mem)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETNE {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWNE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64MOVLatomicload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- return false
}
-func rewriteValueAMD64_OpCondSelect_30(v *Value) bool {
- // match: (CondSelect <t> x y (SETL cond))
- // cond: is16BitInt(t)
- // result: (CMOVWLT y x cond)
+func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool {
+ // match: (AtomicLoad64 ptr mem)
+ // cond:
+ // result: (MOVQatomicload ptr mem)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETL {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWLT)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ v.reset(OpAMD64MOVQatomicload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETG cond))
- // cond: is16BitInt(t)
- // result: (CMOVWGT y x cond)
+}
+func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (AtomicLoadPtr ptr mem)
+ // cond: config.PtrSize == 8
+ // result: (MOVQatomicload ptr mem)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETG {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(config.PtrSize == 8) {
break
}
- v.reset(OpAMD64CMOVWGT)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpAMD64MOVQatomicload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETLE cond))
- // cond: is16BitInt(t)
- // result: (CMOVWLE y x cond)
+ // match: (AtomicLoadPtr ptr mem)
+ // cond: config.PtrSize == 4
+ // result: (MOVLatomicload ptr mem)
for {
- t := v.Type
- _ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETLE {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ mem := v.Args[1]
+ if !(config.PtrSize == 4) {
break
}
- v.reset(OpAMD64CMOVWLE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpAMD64MOVLatomicload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETGE cond))
- // cond: is16BitInt(t)
- // result: (CMOVWGE y x cond)
+ return false
+}
+func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool {
+ // match: (AtomicOr8 ptr val mem)
+ // cond:
+ // result: (ORBlock ptr val mem)
for {
- t := v.Type
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGE {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWGE)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64ORBlock)
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
return true
}
- // match: (CondSelect <t> x y (SETA cond))
- // cond: is16BitInt(t)
- // result: (CMOVWHI y x cond)
+}
+func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (AtomicStore32 ptr val mem)
+ // cond:
+ // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
for {
- t := v.Type
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETA {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWHI)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg(val)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y (SETB cond))
- // cond: is16BitInt(t)
- // result: (CMOVWCS y x cond)
+}
+func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (AtomicStore64 ptr val mem)
+ // cond:
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
for {
- t := v.Type
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETB {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWCS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg(val)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y (SETAE cond))
- // cond: is16BitInt(t)
- // result: (CMOVWCC y x cond)
+}
+func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (AtomicStorePtrNoWB ptr val mem)
+ // cond: config.PtrSize == 8
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
for {
- t := v.Type
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETAE {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(config.PtrSize == 8) {
break
}
- v.reset(OpAMD64CMOVWCC)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
+ v0.AddArg(val)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y (SETBE cond))
- // cond: is16BitInt(t)
- // result: (CMOVWLS y x cond)
+ // match: (AtomicStorePtrNoWB ptr val mem)
+ // cond: config.PtrSize == 4
+ // result: (Select1 (XCHGL <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
for {
- t := v.Type
_ = v.Args[2]
- x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETBE {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(config.PtrSize == 4) {
break
}
- v.reset(OpAMD64CMOVWLS)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem))
+ v0.AddArg(val)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y (SETEQF cond))
- // cond: is16BitInt(t)
- // result: (CMOVWEQF y x cond)
+ return false
+}
+func rewriteValueAMD64_OpAvg64u_0(v *Value) bool {
+ // match: (Avg64u x y)
+ // cond:
+ // result: (AVGQU x y)
for {
- t := v.Type
- _ = v.Args[2]
+ _ = v.Args[1]
x := v.Args[0]
y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETEQF {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWEQF)
- v.AddArg(y)
+ v.reset(OpAMD64AVGQU)
v.AddArg(x)
- v.AddArg(cond)
+ v.AddArg(y)
return true
}
- // match: (CondSelect <t> x y (SETNEF cond))
- // cond: is16BitInt(t)
- // result: (CMOVWNEF y x cond)
+}
+func rewriteValueAMD64_OpBitLen32_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (BitLen32 x)
+ // cond:
+ // result: (BitLen64 (MOVLQZX <typ.UInt64> x))
for {
- t := v.Type
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETNEF {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWNEF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpBitLen64)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
return true
}
- return false
}
-func rewriteValueAMD64_OpCondSelect_40(v *Value) bool {
+func rewriteValueAMD64_OpBitLen64_0(v *Value) bool {
b := v.Block
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (CondSelect <t> x y (SETGF cond))
- // cond: is16BitInt(t)
- // result: (CMOVWGTF y x cond)
+ // match: (BitLen64 <t> x)
+ // cond:
+ // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
for {
t := v.Type
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGF {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWGTF)
- v.AddArg(y)
- v.AddArg(x)
- v.AddArg(cond)
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = 1
+ v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
+ v1 := b.NewValue0(v.Pos, OpSelect0, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
+ v3.AuxInt = -1
+ v0.AddArg(v3)
+ v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v0.AddArg(v4)
+ v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y (SETGEF cond))
- // cond: is16BitInt(t)
- // result: (CMOVWGEF y x cond)
+}
+func rewriteValueAMD64_OpBswap32_0(v *Value) bool {
+ // match: (Bswap32 x)
+ // cond:
+ // result: (BSWAPL x)
for {
- t := v.Type
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- v_2 := v.Args[2]
- if v_2.Op != OpAMD64SETGEF {
- break
- }
- cond := v_2.Args[0]
- if !(is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWGEF)
- v.AddArg(y)
+ v.reset(OpAMD64BSWAPL)
v.AddArg(x)
- v.AddArg(cond)
return true
}
- // match: (CondSelect <t> x y check)
- // cond: !check.Type.IsFlags() && check.Type.Size() == 1
- // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+}
+func rewriteValueAMD64_OpBswap64_0(v *Value) bool {
+ // match: (Bswap64 x)
+ // cond:
+ // result: (BSWAPQ x)
for {
- t := v.Type
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- check := v.Args[2]
- if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
- break
- }
- v.reset(OpCondSelect)
- v.Type = t
+ v.reset(OpAMD64BSWAPQ)
v.AddArg(x)
- v.AddArg(y)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
- v0.AddArg(check)
- v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y check)
- // cond: !check.Type.IsFlags() && check.Type.Size() == 2
- // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+}
+func rewriteValueAMD64_OpCeil_0(v *Value) bool {
+ // match: (Ceil x)
+ // cond:
+ // result: (ROUNDSD [2] x)
for {
- t := v.Type
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- check := v.Args[2]
- if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
- break
- }
- v.reset(OpCondSelect)
- v.Type = t
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = 2
v.AddArg(x)
- v.AddArg(y)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
- v0.AddArg(check)
- v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y check)
- // cond: !check.Type.IsFlags() && check.Type.Size() == 4
- // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+}
+func rewriteValueAMD64_OpClosureCall_0(v *Value) bool {
+ // match: (ClosureCall [argwid] entry closure mem)
+ // cond:
+ // result: (CALLclosure [argwid] entry closure mem)
for {
- t := v.Type
+ argwid := v.AuxInt
_ = v.Args[2]
+ entry := v.Args[0]
+ closure := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64CALLclosure)
+ v.AuxInt = argwid
+ v.AddArg(entry)
+ v.AddArg(closure)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCom16_0(v *Value) bool {
+ // match: (Com16 x)
+ // cond:
+ // result: (NOTL x)
+ for {
x := v.Args[0]
- y := v.Args[1]
- check := v.Args[2]
- if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
- break
- }
- v.reset(OpCondSelect)
- v.Type = t
+ v.reset(OpAMD64NOTL)
v.AddArg(x)
- v.AddArg(y)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
- v0.AddArg(check)
- v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y check)
- // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
- // result: (CMOVQNE y x (CMPQconst [0] check))
+}
+func rewriteValueAMD64_OpCom32_0(v *Value) bool {
+ // match: (Com32 x)
+ // cond:
+ // result: (NOTL x)
for {
- t := v.Type
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- check := v.Args[2]
- if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
- break
- }
- v.reset(OpAMD64CMOVQNE)
- v.AddArg(y)
+ v.reset(OpAMD64NOTL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v0.AuxInt = 0
- v0.AddArg(check)
- v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y check)
- // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
- // result: (CMOVLNE y x (CMPQconst [0] check))
+}
+func rewriteValueAMD64_OpCom64_0(v *Value) bool {
+ // match: (Com64 x)
+ // cond:
+ // result: (NOTQ x)
for {
- t := v.Type
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- check := v.Args[2]
- if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVLNE)
- v.AddArg(y)
+ v.reset(OpAMD64NOTQ)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v0.AuxInt = 0
- v0.AddArg(check)
- v.AddArg(v0)
return true
}
- // match: (CondSelect <t> x y check)
- // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
- // result: (CMOVWNE y x (CMPQconst [0] check))
+}
+func rewriteValueAMD64_OpCom8_0(v *Value) bool {
+ // match: (Com8 x)
+ // cond:
+ // result: (NOTL x)
for {
- t := v.Type
- _ = v.Args[2]
x := v.Args[0]
- y := v.Args[1]
- check := v.Args[2]
- if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
- break
- }
- v.reset(OpAMD64CMOVWNE)
- v.AddArg(y)
+ v.reset(OpAMD64NOTL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v0.AuxInt = 0
- v0.AddArg(check)
- v.AddArg(v0)
return true
}
- return false
}
func rewriteValueAMD64_OpConst16_0(v *Value) bool {
// match: (Const16 [val])