(ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
(ADDQ (LEAQ [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y)
-// fold ADDQconst into leaqX
+// fold ADDQconst into LEAQx
(ADDQconst [c] (LEAQ1 [d] {s} x y)) -> (LEAQ1 [c+d] {s} x y)
(ADDQconst [c] (LEAQ2 [d] {s} x y)) -> (LEAQ2 [c+d] {s} x y)
(ADDQconst [c] (LEAQ4 [d] {s} x y)) -> (LEAQ4 [c+d] {s} x y)
(LEAQ8 [c] {s} (ADDQconst [d] x) y) && x.Op != OpSB -> (LEAQ8 [c+d] {s} x y)
(LEAQ8 [c] {s} x (ADDQconst [d] y)) && y.Op != OpSB -> (LEAQ8 [c+8*d] {s} x y)
+// fold shifts into LEAQx
+(LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y)
+(LEAQ1 [c] {s} (SHLQconst [1] x) y) -> (LEAQ2 [c] {s} y x)
+(LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y)
+(LEAQ1 [c] {s} (SHLQconst [2] x) y) -> (LEAQ4 [c] {s} y x)
+(LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y)
+(LEAQ1 [c] {s} (SHLQconst [3] x) y) -> (LEAQ8 [c] {s} y x)
+
+(LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y)
+(LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y)
+(LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y)
+
// reverse ordering of compare instruction
(SETL (InvertFlags x)) -> (SETG x)
(SETG (InvertFlags x)) -> (SETL x)
v.AddArg(y)
return true
}
+ // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
+ // cond:
+ // result: (LEAQ2 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[1].AuxInt != 1 {
+ break
+ }
+ y := v.Args[1].Args[0]
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y)
+ // cond:
+ // result: (LEAQ2 [c] {s} y x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ if v.Args[0].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].AuxInt != 1 {
+ break
+ }
+ x := v.Args[0].Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
+ // cond:
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[1].AuxInt != 2 {
+ break
+ }
+ y := v.Args[1].Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y)
+ // cond:
+ // result: (LEAQ4 [c] {s} y x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ if v.Args[0].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].AuxInt != 2 {
+ break
+ }
+ x := v.Args[0].Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
+ // cond:
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[1].AuxInt != 3 {
+ break
+ }
+ y := v.Args[1].Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y)
+ // cond:
+ // result: (LEAQ8 [c] {s} y x)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ if v.Args[0].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].AuxInt != 3 {
+ break
+ }
+ x := v.Args[0].Args[0]
+ y := v.Args[1]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
v.AddArg(y)
return true
}
+ // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
+ // cond:
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[1].AuxInt != 1 {
+ break
+ }
+ y := v.Args[1].Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
+ // cond:
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[1].AuxInt != 2 {
+ break
+ }
+ y := v.Args[1].Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)
v.AddArg(y)
return true
}
+ // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
+ // cond:
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := v.AuxInt
+ s := v.Aux
+ x := v.Args[0]
+ if v.Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[1].AuxInt != 1 {
+ break
+ }
+ y := v.Args[1].Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = c
+ v.Aux = s
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
// cond: canMergeSym(sym1, sym2) && x.Op != OpSB
// result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y)