// Lowering comparisons
(EqB x y) -> (ANDconst [1] (EQV x y))
+// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
+(Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
(Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Eq32 x y) -> (Equal (CMPW x y))
(EqPtr x y) -> (Equal (CMP x y))
(NeqB x y) -> (XOR x y)
+// Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
+(Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
(Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Neq32 x y) -> (NotEqual (CMPW x y))
(Trunc64to16 x) -> (MOVHreg x)
(Trunc64to32 x) -> (MOVWreg x)
+// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
+// This may interact with other patterns in the future. (Compare with arm64)
+(MOVBZreg x:(MOVBZload _ _)) -> x
+(MOVBreg x:(MOVBload _ _)) -> x
+(MOVHZreg x:(MOVHZload _ _)) -> x
+(MOVHreg x:(MOVHload _ _)) -> x
return rewriteValuePPC64_OpPPC64LessThan(v, config)
case OpPPC64MOVBZload:
return rewriteValuePPC64_OpPPC64MOVBZload(v, config)
+ case OpPPC64MOVBZreg:
+ return rewriteValuePPC64_OpPPC64MOVBZreg(v, config)
case OpPPC64MOVBload:
return rewriteValuePPC64_OpPPC64MOVBload(v, config)
+ case OpPPC64MOVBreg:
+ return rewriteValuePPC64_OpPPC64MOVBreg(v, config)
case OpPPC64MOVBstore:
return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
case OpPPC64MOVBstorezero:
return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
case OpPPC64MOVHZload:
return rewriteValuePPC64_OpPPC64MOVHZload(v, config)
+ case OpPPC64MOVHZreg:
+ return rewriteValuePPC64_OpPPC64MOVHZreg(v, config)
case OpPPC64MOVHload:
return rewriteValuePPC64_OpPPC64MOVHload(v, config)
+ case OpPPC64MOVHreg:
+ return rewriteValuePPC64_OpPPC64MOVHreg(v, config)
case OpPPC64MOVHstore:
return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
case OpPPC64MOVHstorezero:
b := v.Block
_ = b
// match: (Eq16 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ break
+ }
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Eq16 x y)
// cond:
// result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
b := v.Block
_ = b
// match: (Eq8 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ break
+ }
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Eq8 x y)
// cond:
// result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
b := v.Block
_ = b
// match: (Neq16 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ break
+ }
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Neq16 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
for {
b := v.Block
_ = b
// match: (Neq8 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ break
+ }
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+ v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+ v2.AddArg(y)
+ v0.AddArg(v2)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Neq8 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
for {
}
return false
}
+func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBZreg x:(MOVBZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MOVBload(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
+func rewriteValuePPC64_OpPPC64MOVBreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVBreg x:(MOVBload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVBload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
+func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHZreg x:(MOVHZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MOVHload(v *Value, config *Config) bool {
b := v.Block
_ = b
}
return false
}
+func rewriteValuePPC64_OpPPC64MOVHreg(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (MOVHreg x:(MOVHload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVHload {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
b := v.Block
_ = b