(MOVWZreg x:(MOVHZreg _)) -> x
(MOVWZreg x:(MOVWZreg _)) -> x
+// fold extensions into constants
+(MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
+(MOVBZreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
+(MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
+(MOVHZreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
+(MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
+(MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
+
// sign extended loads
// Note: The combined instruction must end up in the same block
// as the original load. If not, we end up making a value with
v.AddArg(x)
return true
}
+ // match: (MOVBZreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(uint8(c))
+ return true
+ }
// match: (MOVBZreg x:(MOVBZload [off] {sym} ptr mem))
// cond: x.Uses == 1 && clobber(x)
// result: @x.Block (MOVBZload <v.Type> [off] {sym} ptr mem)
v.AddArg(x)
return true
}
+ // match: (MOVBreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(int8(c))
+ return true
+ }
// match: (MOVBreg x:(MOVBZload [off] {sym} ptr mem))
// cond: x.Uses == 1 && clobber(x)
// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
v.AddArg(x)
return true
}
+ // match: (MOVHZreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(uint16(c))
+ return true
+ }
// match: (MOVHZreg x:(MOVHZload [off] {sym} ptr mem))
// cond: x.Uses == 1 && clobber(x)
// result: @x.Block (MOVHZload <v.Type> [off] {sym} ptr mem)
v.AddArg(x)
return true
}
+ // match: (MOVHreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(int16(c))
+ return true
+ }
// match: (MOVHreg x:(MOVHZload [off] {sym} ptr mem))
// cond: x.Uses == 1 && clobber(x)
// result: @x.Block (MOVHload <v.Type> [off] {sym} ptr mem)
v.AddArg(x)
return true
}
+ // match: (MOVWZreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(uint32(c))
+ return true
+ }
// match: (MOVWZreg x:(MOVWZload [off] {sym} ptr mem))
// cond: x.Uses == 1 && clobber(x)
// result: @x.Block (MOVWZload <v.Type> [off] {sym} ptr mem)
v.AddArg(x)
return true
}
+ // match: (MOVWreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64(int32(c))
+ return true
+ }
// match: (MOVWreg x:(MOVWZload [off] {sym} ptr mem))
// cond: x.Uses == 1 && clobber(x)
// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)