v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
// match: (CMOVLGE x y (InvertFlags cond))
// result: (CMOVLLE x y cond)
for {
v.copyOf(y)
return true
}
+ // match: (CMOVLGE x y c:(CMPQconst [128] z))
+ // cond: c.Uses == 1
+ // result: (CMOVLGT x y (CMPQconst [127] z))
+ for {
+ x := v_0
+ y := v_1
+ c := v_2
+ if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGT)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CMOVLGE x y c:(CMPLconst [128] z))
+ // cond: c.Uses == 1
+ // result: (CMOVLGT x y (CMPLconst [127] z))
+ for {
+ x := v_0
+ y := v_1
+ c := v_2
+ if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGT)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ v.AddArg3(x, y, v0)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
// match: (CMOVLLT x y (InvertFlags cond))
// result: (CMOVLGT x y cond)
for {
v.copyOf(x)
return true
}
+ // match: (CMOVLLT x y c:(CMPQconst [128] z))
+ // cond: c.Uses == 1
+ // result: (CMOVLLE x y (CMPQconst [127] z))
+ for {
+ x := v_0
+ y := v_1
+ c := v_2
+ if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CMOVLLT x y c:(CMPLconst [128] z))
+ // cond: c.Uses == 1
+ // result: (CMOVLLE x y (CMPLconst [127] z))
+ for {
+ x := v_0
+ y := v_1
+ c := v_2
+ if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ v.AddArg3(x, y, v0)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
// match: (CMOVQGE x y (InvertFlags cond))
// result: (CMOVQLE x y cond)
for {
v.copyOf(y)
return true
}
+ // match: (CMOVQGE x y c:(CMPQconst [128] z))
+ // cond: c.Uses == 1
+ // result: (CMOVQGT x y (CMPQconst [127] z))
+ for {
+ x := v_0
+ y := v_1
+ c := v_2
+ if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGT)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CMOVQGE x y c:(CMPLconst [128] z))
+ // cond: c.Uses == 1
+ // result: (CMOVQGT x y (CMPLconst [127] z))
+ for {
+ x := v_0
+ y := v_1
+ c := v_2
+ if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGT)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ v.AddArg3(x, y, v0)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
// match: (CMOVQLT x y (InvertFlags cond))
// result: (CMOVQGT x y cond)
for {
v.copyOf(x)
return true
}
+ // match: (CMOVQLT x y c:(CMPQconst [128] z))
+ // cond: c.Uses == 1
+ // result: (CMOVQLE x y (CMPQconst [127] z))
+ for {
+ x := v_0
+ y := v_1
+ c := v_2
+ if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CMOVQLT x y c:(CMPLconst [128] z))
+ // cond: c.Uses == 1
+ // result: (CMOVQLE x y (CMPLconst [127] z))
+ for {
+ x := v_0
+ y := v_1
+ c := v_2
+ if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ v.AddArg3(x, y, v0)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
v.AddArg(v0)
return true
}
+ // match: (SETAE c:(CMPQconst [128] x))
+ // cond: c.Uses == 1
+ // result: (SETA (CMPQconst [127] x))
+ for {
+ c := v_0
+ if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ x := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETAE c:(CMPLconst [128] x))
+ // cond: c.Uses == 1
+ // result: (SETA (CMPLconst [127] x))
+ for {
+ c := v_0
+ if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ x := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETA)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
// match: (SETAE (InvertFlags x))
// result: (SETBE x)
for {
}
func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
// match: (SETB (TESTQ x x))
// result: (ConstBool [false])
for {
v.AddArg(x)
return true
}
+ // match: (SETB c:(CMPQconst [128] x))
+ // cond: c.Uses == 1
+ // result: (SETBE (CMPQconst [127] x))
+ for {
+ c := v_0
+ if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ x := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETB c:(CMPLconst [128] x))
+ // cond: c.Uses == 1
+ // result: (SETBE (CMPLconst [127] x))
+ for {
+ c := v_0
+ if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ x := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
// match: (SETB (InvertFlags x))
// result: (SETA x)
for {
}
func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ // match: (SETGE c:(CMPQconst [128] x))
+ // cond: c.Uses == 1
+ // result: (SETG (CMPQconst [127] x))
+ for {
+ c := v_0
+ if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ x := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETGE c:(CMPLconst [128] x))
+ // cond: c.Uses == 1
+ // result: (SETG (CMPLconst [127] x))
+ for {
+ c := v_0
+ if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ x := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETG)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
// match: (SETGE (InvertFlags x))
// result: (SETLE x)
for {
}
func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ // match: (SETL c:(CMPQconst [128] x))
+ // cond: c.Uses == 1
+ // result: (SETLE (CMPQconst [127] x))
+ for {
+ c := v_0
+ if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ x := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETL c:(CMPLconst [128] x))
+ // cond: c.Uses == 1
+ // result: (SETLE (CMPLconst [127] x))
+ for {
+ c := v_0
+ if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ x := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
// match: (SETL (InvertFlags x))
// result: (SETG x)
for {
break
}
case BlockAMD64GE:
+ // match: (GE c:(CMPQconst [128] z) yes no)
+ // cond: c.Uses == 1
+ // result: (GT (CMPQconst [127] z) yes no)
+ for b.Controls[0].Op == OpAMD64CMPQconst {
+ c := b.Controls[0]
+ if auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(c.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ b.resetWithControl(BlockAMD64GT, v0)
+ return true
+ }
+ // match: (GE c:(CMPLconst [128] z) yes no)
+ // cond: c.Uses == 1
+ // result: (GT (CMPLconst [127] z) yes no)
+ for b.Controls[0].Op == OpAMD64CMPLconst {
+ c := b.Controls[0]
+ if auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(c.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ b.resetWithControl(BlockAMD64GT, v0)
+ return true
+ }
// match: (GE (InvertFlags cmp) yes no)
// result: (LE cmp yes no)
for b.Controls[0].Op == OpAMD64InvertFlags {
return true
}
case BlockAMD64LT:
+ // match: (LT c:(CMPQconst [128] z) yes no)
+ // cond: c.Uses == 1
+ // result: (LE (CMPQconst [127] z) yes no)
+ for b.Controls[0].Op == OpAMD64CMPQconst {
+ c := b.Controls[0]
+ if auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(c.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ b.resetWithControl(BlockAMD64LE, v0)
+ return true
+ }
+ // match: (LT c:(CMPLconst [128] z) yes no)
+ // cond: c.Uses == 1
+ // result: (LE (CMPLconst [127] z) yes no)
+ for b.Controls[0].Op == OpAMD64CMPLconst {
+ c := b.Controls[0]
+ if auxIntToInt32(c.AuxInt) != 128 {
+ break
+ }
+ z := c.Args[0]
+ if !(c.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(c.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(127)
+ v0.AddArg(z)
+ b.resetWithControl(BlockAMD64LE, v0)
+ return true
+ }
// match: (LT (InvertFlags cmp) yes no)
// result: (GT cmp yes no)
for b.Controls[0].Op == OpAMD64InvertFlags {