(Sub(64|32|16|8) (Com(64|32|16|8) x) (Neg(64|32|16|8) x)) => (Const(64|32|16|8) [-1])
(Add(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
+// Simplification when involving common integer
+// (t + x) - (t + y) == x - y
+// (t + x) - (y + t) == x - y
+// (x + t) - (y + t) == x - y
+// (x + t) - (t + y) == x - y
+// (x - t) + (t + y) == x + y
+// (x - t) + (y + t) == x + y
+(Sub(64|32|16|8) (Add(64|32|16|8) t x) (Add(64|32|16|8) t y)) => (Sub(64|32|16|8) x y)
+(Add(64|32|16|8) (Sub(64|32|16|8) x t) (Add(64|32|16|8) t y)) => (Add(64|32|16|8) x y)
+
// ^(x-1) == ^x+1 == -x
(Add(64|32|16|8) (Const(64|32|16|8) [1]) (Com(64|32|16|8) x)) => (Neg(64|32|16|8) x)
(Com(64|32|16|8) (Add(64|32|16|8) (Const(64|32|16|8) [-1]) x)) => (Neg(64|32|16|8) x)
}
break
}
+ // match: (Add16 (Sub16 x t) (Add16 t y))
+ // result: (Add16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub16 {
+ continue
+ }
+ t := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAdd16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (Add16 (Const16 [1]) (Com16 x))
// result: (Neg16 x)
for {
}
break
}
+ // match: (Add32 (Sub32 x t) (Add32 t y))
+ // result: (Add32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub32 {
+ continue
+ }
+ t := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAdd32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (Add32 (Const32 [1]) (Com32 x))
// result: (Neg32 x)
for {
}
break
}
+ // match: (Add64 (Sub64 x t) (Add64 t y))
+ // result: (Add64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub64 {
+ continue
+ }
+ t := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAdd64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (Add64 (Const64 [1]) (Com64 x))
// result: (Neg64 x)
for {
}
break
}
+ // match: (Add8 (Sub8 x t) (Add8 t y))
+ // result: (Add8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub8 {
+ continue
+ }
+ t := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAdd8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (Add8 (Const8 [1]) (Com8 x))
// result: (Neg8 x)
for {
v.AuxInt = int16ToAuxInt(-1)
return true
}
+ // match: (Sub16 (Add16 t x) (Add16 t y))
+ // result: (Sub16 x y)
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ t := v_0_0
+ x := v_0_1
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpSub16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (Sub16 (Add16 x y) x)
// result: y
for {
v.AuxInt = int32ToAuxInt(-1)
return true
}
+ // match: (Sub32 (Add32 t x) (Add32 t y))
+ // result: (Sub32 x y)
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ t := v_0_0
+ x := v_0_1
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpSub32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (Sub32 (Add32 x y) x)
// result: y
for {
v.AuxInt = int64ToAuxInt(-1)
return true
}
+ // match: (Sub64 (Add64 t x) (Add64 t y))
+ // result: (Sub64 x y)
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ t := v_0_0
+ x := v_0_1
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpSub64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (Sub64 (Add64 x y) x)
// result: y
for {
v.AuxInt = int8ToAuxInt(-1)
return true
}
+ // match: (Sub8 (Add8 t x) (Add8 t y))
+ // result: (Sub8 x y)
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ t := v_0_0
+ x := v_0_1
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpSub8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (Sub8 (Add8 x y) x)
// result: y
for {
return r
}
+func SubAddSimplify2(a, b, c int) (int, int, int, int, int, int) {
+ // amd64:-"ADDQ"
+ r := (a + b) - (a + c)
+ // amd64:-"ADDQ"
+ r1 := (a + b) - (c + a)
+ // amd64:-"ADDQ"
+ r2 := (b + a) - (a + c)
+ // amd64:-"ADDQ"
+ r3 := (b + a) - (c + a)
+ // amd64:-"SUBQ"
+ r4 := (a - c) + (c + b)
+ // amd64:-"SUBQ"
+ r5 := (a - c) + (b + c)
+ return r, r1, r2, r3, r4, r5
+}
+
func SubAddNegSimplify(a, b int) int {
// amd64:"NEGQ",-"ADDQ",-"SUBQ"
// ppc64x:"NEG",-"ADD",-"SUB"