(SARB x (MOVWconst [c])) -> (SARBconst [c&31] x)
(SARB x (MOVBconst [c])) -> (SARBconst [c&31] x)
+(SARB x (ANDBconst [31] y)) -> (SARB x y)
+(SARW x (ANDWconst [31] y)) -> (SARW x y)
+(SARL x (ANDLconst [31] y)) -> (SARL x y)
+(SARQ x (ANDQconst [63] y)) -> (SARQ x y)
+
+(SHLB x (ANDBconst [31] y)) -> (SHLB x y)
+(SHLW x (ANDWconst [31] y)) -> (SHLW x y)
+(SHLL x (ANDLconst [31] y)) -> (SHLL x y)
+(SHLQ x (ANDQconst [63] y)) -> (SHLQ x y)
+
+(SHRB x (ANDBconst [31] y)) -> (SHRB x y)
+(SHRW x (ANDWconst [31] y)) -> (SHRW x y)
+(SHRL x (ANDLconst [31] y)) -> (SHRL x y)
+(SHRQ x (ANDQconst [63] y)) -> (SHRQ x y)
+
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even
// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
v.AddArg(x)
return true
}
+ // match: (SARB x (ANDBconst [31] y))
+ // cond:
+ // result: (SARB x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDBconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SARB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SARL x (ANDLconst [31] y))
+ // cond:
+ // result: (SARL x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SARL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SARQ x (ANDQconst [63] y))
+ // cond:
+ // result: (SARQ x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SARQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SARW x (ANDWconst [31] y))
+ // cond:
+ // result: (SARW x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDWconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SARW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SHLB x (ANDBconst [31] y))
+ // cond:
+ // result: (SHLB x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDBconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHLB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SHLL x (ANDLconst [31] y))
+ // cond:
+ // result: (SHLL x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHLL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SHLQ x (ANDQconst [63] y))
+ // cond:
+ // result: (SHLQ x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHLQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHLW(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SHLW x (ANDWconst [31] y))
+ // cond:
+ // result: (SHLW x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDWconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SHRB x (ANDBconst [31] y))
+ // cond:
+ // result: (SHRB x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDBconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHRB)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SHRL x (ANDLconst [31] y))
+ // cond:
+ // result: (SHRL x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SHRQ x (ANDQconst [63] y))
+ // cond:
+ // result: (SHRQ x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ if v_1.AuxInt != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHRQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (SHRW x (ANDWconst [31] y))
+ // cond:
+ // result: (SHRW x y)
+ for {
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64ANDWconst {
+ break
+ }
+ if v_1.AuxInt != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SHRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SUBB(v *Value, config *Config) bool {