(ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x)
(ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x)
+(XORLconst [c] (XORLconst [d] x)) -> (XORLconst [c ^ d] x)
+(XORQconst [c] (XORQconst [d] x)) -> (XORQconst [c ^ d] x)
+
+(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
+(MULQconst [c] (MULQconst [d] x)) -> (MULQconst [c * d] x)
+
(ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
(ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x)
(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
(SHRL x (ANDLconst [31] y)) -> (SHRL x y)
(SHRQ x (ANDQconst [63] y)) -> (SHRQ x y)
+(ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x)
+(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
+(ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
+(ROLBconst [c] (ROLBconst [d] x)) -> (ROLBconst [(c+d)& 7] x)
+
+(ROLQconst [0] x) -> x
+(ROLLconst [0] x) -> x
+(ROLWconst [0] x) -> x
+(ROLBconst [0] x) -> x
+
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even
// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
return rewriteValueAMD64_OpOr8(v, config)
case OpOrB:
return rewriteValueAMD64_OpOrB(v, config)
+ case OpAMD64ROLBconst:
+ return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
+ case OpAMD64ROLLconst:
+ return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
+ case OpAMD64ROLQconst:
+ return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
+ case OpAMD64ROLWconst:
+ return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
case OpRsh16Ux16:
return rewriteValueAMD64_OpRsh16Ux16(v, config)
case OpRsh16Ux32:
func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // cond:
+ // result: (MULLconst [int64(int32(c * d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MULLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64MULLconst)
+ v.AuxInt = int64(int32(c * d))
+ v.AddArg(x)
+ return true
+ }
// match: (MULLconst [c] (MOVLconst [d]))
// cond:
// result: (MOVLconst [int64(int32(c*d))])
func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (MULQconst [c] (MULQconst [d] x))
+ // cond:
+ // result: (MULQconst [c * d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MULQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = c * d
+ v.AddArg(x)
+ return true
+ }
// match: (MULQconst [-1] x)
// cond:
// result: (NEGQ x)
return true
}
}
+func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ROLBconst [c] (ROLBconst [d] x))
+ // cond:
+ // result: (ROLBconst [(c+d)& 7] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ROLBconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = (c + d) & 7
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLBconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ROLLconst [c] (ROLLconst [d] x))
+ // cond:
+ // result: (ROLLconst [(c+d)&31] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ROLLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = (c + d) & 31
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLLconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ROLQconst [c] (ROLQconst [d] x))
+ // cond:
+ // result: (ROLQconst [(c+d)&63] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ROLQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = (c + d) & 63
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLQconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (ROLWconst [c] (ROLWconst [d] x))
+ // cond:
+ // result: (ROLWconst [(c+d)&15] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ROLWconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = (c + d) & 15
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLWconst [0] x)
+ // cond:
+ // result: x
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ x := v.Args[0]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
// match: (XORLconst [c] x)
// cond: int32(c)==0
// result: x
func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (XORQconst [c] (XORQconst [d] x))
+ // cond:
+ // result: (XORQconst [c ^ d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c ^ d
+ v.AddArg(x)
+ return true
+ }
// match: (XORQconst [0] x)
// cond:
// result: x
v.AuxInt = int64(int16(c * d))
return true
}
+ // match: (Mul16 (Const16 [-1]) x)
+ // cond:
+ // result: (Neg16 x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst16 {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
// match: (Mul16 x (Const16 <t> [c]))
// cond: x.Op != OpConst16
// result: (Mul16 (Const16 <t> [c]) x)
v.AuxInt = int64(int32(c * d))
return true
}
+ // match: (Mul32 (Const32 [-1]) x)
+ // cond:
+ // result: (Neg32 x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32 {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
// match: (Mul32 x (Const32 <t> [c]))
// cond: x.Op != OpConst32
// result: (Mul32 (Const32 <t> [c]) x)
v.AuxInt = c * d
return true
}
+ // match: (Mul64 (Const64 [-1]) x)
+ // cond:
+ // result: (Neg64 x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64 {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
// match: (Mul64 x (Const64 <t> [c]))
// cond: x.Op != OpConst64
// result: (Mul64 (Const64 <t> [c]) x)
v.AuxInt = int64(int8(c * d))
return true
}
+ // match: (Mul8 (Const8 [-1]) x)
+ // cond:
+ // result: (Neg8 x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst8 {
+ break
+ }
+ if v_0.AuxInt != -1 {
+ break
+ }
+ x := v.Args[1]
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
// match: (Mul8 x (Const8 <t> [c]))
// cond: x.Op != OpConst8
// result: (Mul8 (Const8 <t> [c]) x)