}
}
+func isGPReg(r int16) bool {
+ return x86.REG_AL <= r && r <= x86.REG_R15
+}
+
func isFPReg(r int16) bool {
return x86.REG_X0 <= r && r <= x86.REG_Z31
}
if v.Type.IsMemory() {
return
}
- x := v.Args[0].Reg()
+ arg := v.Args[0]
+ x := arg.Reg()
y := v.Reg()
if v.Type.IsSIMD() {
- x = simdOrMaskReg(v.Args[0])
+ x = simdOrMaskReg(arg)
y = simdOrMaskReg(v)
}
if x != y {
- opregreg(s, moveByRegsWidth(y, x, v.Type.Size()), y, x)
+ width := v.Type.Size()
+ if width == 8 && isGPReg(y) && ssa.ZeroUpper32Bits(arg, 3) {
+ // The source was naturally zext-ed from 32 to 64 bits,
+ // but we are asked to do a full 64-bit copy.
+ // Save the REX prefix byte in I-CACHE by using a 32-bit move,
+ // since it zeroes the upper 32 bits anyway.
+ width = 4
+ }
+ opregreg(s, moveByRegsWidth(y, x, width), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
// check if value zeroes out upper 32-bit of 64-bit register.
// depth limits recursion depth. In AMD64.rules 3 is used as limit,
// because it catches same amount of cases as 4.
-func zeroUpper32Bits(x *Value, depth int) bool {
+func ZeroUpper32Bits(x *Value, depth int) bool {
if x.Type.IsSigned() && x.Type.Size() < 8 {
// If the value is signed, it might get re-sign-extended
// during spill and restore. See issue 68227.
OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
OpAMD64SHLL, OpAMD64SHLLconst:
return true
+ case OpAMD64MOVQconst:
+ return uint64(uint32(x.AuxInt)) == uint64(x.AuxInt)
case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
return false
}
for i := range x.Args {
- if !zeroUpper32Bits(x.Args[i], depth-1) {
+ if !ZeroUpper32Bits(x.Args[i], depth-1) {
return false
}
}
return false
}
-// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits.
-func zeroUpper48Bits(x *Value, depth int) bool {
+// ZeroUpper48Bits is similar to ZeroUpper32Bits, but for upper 48 bits.
+func ZeroUpper48Bits(x *Value, depth int) bool {
if x.Type.IsSigned() && x.Type.Size() < 8 {
return false
}
switch x.Op {
case OpAMD64MOVWQZX, OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVWloadidx2:
return true
+ case OpAMD64MOVQconst, OpAMD64MOVLconst:
+ return uint64(uint16(x.AuxInt)) == uint64(x.AuxInt)
case OpArg: // note: but not ArgIntReg
return x.Type.Size() == 2 && x.Block.Func.Config.arch == "amd64"
case OpPhi, OpSelect0, OpSelect1:
return false
}
for i := range x.Args {
- if !zeroUpper48Bits(x.Args[i], depth-1) {
+ if !ZeroUpper48Bits(x.Args[i], depth-1) {
return false
}
}
return false
}
-// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits.
-func zeroUpper56Bits(x *Value, depth int) bool {
+// ZeroUpper56Bits is similar to ZeroUpper32Bits, but for upper 56 bits.
+func ZeroUpper56Bits(x *Value, depth int) bool {
if x.Type.IsSigned() && x.Type.Size() < 8 {
return false
}
switch x.Op {
case OpAMD64MOVBQZX, OpAMD64MOVBload, OpAMD64MOVBloadidx1:
return true
+ case OpAMD64MOVQconst, OpAMD64MOVLconst:
+ return uint64(uint8(x.AuxInt)) == uint64(x.AuxInt)
case OpArg: // note: but not ArgIntReg
return x.Type.Size() == 1 && x.Block.Func.Config.arch == "amd64"
case OpPhi, OpSelect0, OpSelect1:
return false
}
for i := range x.Args {
- if !zeroUpper56Bits(x.Args[i], depth-1) {
+ if !ZeroUpper56Bits(x.Args[i], depth-1) {
return false
}
}
func rewriteValueAMD64latelower_OpAMD64MOVBQZX(v *Value) bool {
v_0 := v.Args[0]
// match: (MOVBQZX x)
- // cond: zeroUpper56Bits(x,3)
+ // cond: ZeroUpper56Bits(x,3)
// result: x
for {
x := v_0
- if !(zeroUpper56Bits(x, 3)) {
+ if !(ZeroUpper56Bits(x, 3)) {
break
}
v.copyOf(x)
func rewriteValueAMD64latelower_OpAMD64MOVLQZX(v *Value) bool {
v_0 := v.Args[0]
// match: (MOVLQZX x)
- // cond: zeroUpper32Bits(x,3)
+ // cond: ZeroUpper32Bits(x,3)
// result: x
for {
x := v_0
- if !(zeroUpper32Bits(x, 3)) {
+ if !(ZeroUpper32Bits(x, 3)) {
break
}
v.copyOf(x)
func rewriteValueAMD64latelower_OpAMD64MOVWQZX(v *Value) bool {
v_0 := v.Args[0]
// match: (MOVWQZX x)
- // cond: zeroUpper48Bits(x,3)
+ // cond: ZeroUpper48Bits(x,3)
// result: x
for {
x := v_0
- if !(zeroUpper48Bits(x, 3)) {
+ if !(ZeroUpper48Bits(x, 3)) {
break
}
v.copyOf(x)