(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
+(NEG (SUB x y)) => (SUB y x)
// Use register moves instead of stores and loads to move int<=>float values
// Common with math Float64bits, Float64frombits
((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
-// ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
+// ISEL auxInt values 4=GE 5=LE 6=NE !arg2 ? arg1 : arg0
// ISELB special case where arg0, arg1 values are 0, 1
(Equal cmp) => (ISELB [2] (MOVDconst [1]) cmp)
(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
+(XORconst [1] (ISELB [6] (MOVDconst [1]) cmp)) => (ISELB [2] (MOVDconst [1]) cmp)
+(XORconst [1] (ISELB [5] (MOVDconst [1]) cmp)) => (ISELB [1] (MOVDconst [1]) cmp)
+(XORconst [1] (ISELB [4] (MOVDconst [1]) cmp)) => (ISELB [0] (MOVDconst [1]) cmp)
// A particular pattern seen in cgo code:
(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
{name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"},
// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
- // ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0
+ // ISEL auxInt values 4=GE 5=LE 6=NE !arg2 ? arg1 : arg0
// ISELB special case where arg0, arg1 values are 0, 1 for boolean result
{name: "ISEL", argLength: 3, reg: crgp21, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above
{name: "ISELB", argLength: 2, reg: crgp11, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above
v.AddArg(x)
return true
}
+ // match: (NEG (SUB x y))
+ // result: (SUB y x)
+ for {
+ if v_0.Op != OpPPC64SUB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpPPC64SUB)
+ v.AddArg2(y, x)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64NOR(v *Value) bool {
}
func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (XORconst [c] (XORconst [d] x))
// result: (XORconst [c^d] x)
for {
v.copyOf(x)
return true
}
+ // match: (XORconst [1] (ISELB [6] (MOVDconst [1]) cmp))
+ // result: (ISELB [2] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 6 {
+ break
+ }
+ cmp := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ // match: (XORconst [1] (ISELB [5] (MOVDconst [1]) cmp))
+ // result: (ISELB [1] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 5 {
+ break
+ }
+ cmp := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ // match: (XORconst [1] (ISELB [4] (MOVDconst [1]) cmp))
+ // result: (ISELB [0] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 4 {
+ break
+ }
+ cmp := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPanicBounds(v *Value) bool {