(LEAQ8 [off1+4*off2] {sym1} x y)
// TODO: more?
+// Lower LEAQ2/4/8 when the offset is a constant
+(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(off+scale*2) ->
+ (LEAQ [off+scale*2] {sym} x)
+(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(off+scale*4) ->
+ (LEAQ [off+scale*4] {sym} x)
+(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(off+scale*8) ->
+ (LEAQ [off+scale*8] {sym} x)
+
// Absorb InvertFlags into branches.
(LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
(GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
v.AddArg2(x, y)
return true
}
+ // match: (LEAQ2 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(off+scale*2)
+ // result: (LEAQ [off+scale*2] {sym} x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := v_1.AuxInt
+ if !(is32Bit(off + scale*2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = off + scale*2
+ v.Aux = sym
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ2 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(off+scale*2)
+ // result: (LEAQ [off+scale*2] {sym} x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := v_1.AuxInt
+ if !(is32Bit(off + scale*2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = off + scale*2
+ v.Aux = sym
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
v.AddArg2(x, y)
return true
}
+ // match: (LEAQ4 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(off+scale*4)
+ // result: (LEAQ [off+scale*4] {sym} x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := v_1.AuxInt
+ if !(is32Bit(off + scale*4)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = off + scale*4
+ v.Aux = sym
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ4 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(off+scale*4)
+ // result: (LEAQ [off+scale*4] {sym} x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := v_1.AuxInt
+ if !(is32Bit(off + scale*4)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = off + scale*4
+ v.Aux = sym
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
v.AddArg2(x, y)
return true
}
+ // match: (LEAQ8 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(off+scale*8)
+ // result: (LEAQ [off+scale*8] {sym} x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := v_1.AuxInt
+ if !(is32Bit(off + scale*8)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = off + scale*8
+ v.Aux = sym
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ8 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(off+scale*8)
+ // result: (LEAQ [off+scale*8] {sym} x)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := v_1.AuxInt
+ if !(is32Bit(off + scale*8)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = off + scale*8
+ v.Aux = sym
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {