From f9cf2c4d0424e352f30d50b89d50eafbfb6fc019 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 5 Apr 2023 15:49:07 -0400 Subject: [PATCH] cmd/compile: use correct type in amd64 late-lower rules The wrong type causes the wrong width spill, which corrupts the value. I tried to write a test for this and did not succeed, but was able (using gossahash and ssa.html) to isolate to exact change and spill. Fixes #59432. Change-Id: I85ad82c9f8fed7674c69d6a2b0a62e111f690454 Reviewed-on: https://go-review.googlesource.com/c/go/+/482536 Reviewed-by: Cherry Mui Run-TryBot: David Chase TryBot-Result: Gopher Robot --- .../internal/ssa/_gen/AMD64latelower.rules | 10 +-- .../internal/ssa/rewriteAMD64latelower.go | 66 ++++++++++--------- 2 files changed, 41 insertions(+), 35 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules b/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules index bcf453128a..3cac5597f1 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules @@ -4,11 +4,11 @@ // split 3 operand LEA. // Note: Don't split pointer computations in order to avoid invalid pointers. -(LEA(Q|L|W)1 [c] {s} x y) && isPtr(x.Type) && c != 0 && s == nil => (ADD(Q|L|L) x (ADD(Q|L|L)const [c] y)) -(LEA(Q|L|W)1 [c] {s} x y) && !isPtr(x.Type) && c != 0 && s == nil => (ADD(Q|L|L) y (ADD(Q|L|L)const [c] x)) -(LEA(Q|L|W)2 [c] {s} x y) && !isPtr(t) && c != 0 && s == nil => (ADD(Q|L|L)const [c] (LEA(Q|L|W)2 x y)) -(LEA(Q|L|W)4 [c] {s} x y) && !isPtr(t) && c != 0 && s == nil => (ADD(Q|L|L)const [c] (LEA(Q|L|W)4 x y)) -(LEA(Q|L|W)8 [c] {s} x y) && !isPtr(t) && c != 0 && s == nil => (ADD(Q|L|L)const [c] (LEA(Q|L|W)8 x y)) +(LEA(Q|L|W)1 [c] {s} x y) && isPtr(x.Type) && c != 0 && s == nil => (ADD(Q|L|L) x (ADD(Q|L|L)const [c] y)) +(LEA(Q|L|W)1 [c] {s} x y) && !isPtr(x.Type) && c != 0 && s == nil => (ADD(Q|L|L) y (ADD(Q|L|L)const [c] x)) +(LEA(Q|L|W)2 [c] {s} x y) && !isPtr(t) && c != 0 && s == nil => (ADD(Q|L|L)const [c] (LEA(Q|L|W)2 x y)) +(LEA(Q|L|W)4 [c] {s} x y) && !isPtr(t) && c != 0 && s == nil => (ADD(Q|L|L)const [c] (LEA(Q|L|W)4 x y)) +(LEA(Q|L|W)8 [c] {s} x y) && !isPtr(t) && c != 0 && s == nil => (ADD(Q|L|L)const [c] (LEA(Q|L|W)8 x y)) // Prefer SARX/SHLX/SHRX instruction because it has less register restriction on the shift input. (SAR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SARX(Q|L) x y) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go b/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go index a6ba7d9e33..4068035241 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go @@ -51,8 +51,9 @@ func rewriteValueAMD64latelower_OpAMD64LEAL1(v *Value) bool { b := v.Block // match: (LEAL1 [c] {s} x y) // cond: isPtr(x.Type) && c != 0 && s == nil - // result: (ADDL x (ADDLconst [c] y)) + // result: (ADDL x (ADDLconst [c] y)) for { + t := v.Type c := auxIntToInt32(v.AuxInt) s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -62,7 +63,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAL1(v *Value) bool { continue } v.reset(OpAMD64ADDL) - v0 := b.NewValue0(v.Pos, OpAMD64ADDLconst, y.Type) + v0 := b.NewValue0(v.Pos, OpAMD64ADDLconst, t) v0.AuxInt = int32ToAuxInt(c) v0.AddArg(y) v.AddArg2(x, v0) @@ -72,8 +73,9 @@ func rewriteValueAMD64latelower_OpAMD64LEAL1(v *Value) bool { } // match: (LEAL1 [c] {s} x y) // cond: !isPtr(x.Type) && c != 0 && s == nil - // result: (ADDL y (ADDLconst [c] x)) + // result: (ADDL y (ADDLconst [c] x)) for { + t := v.Type c := auxIntToInt32(v.AuxInt) s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -83,7 +85,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAL1(v *Value) bool { continue } v.reset(OpAMD64ADDL) - v0 := b.NewValue0(v.Pos, OpAMD64ADDLconst, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64ADDLconst, t) v0.AuxInt = int32ToAuxInt(c) v0.AddArg(x) v.AddArg2(y, v0) @@ -99,7 +101,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAL2(v *Value) bool { b := v.Block // match: (LEAL2 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDLconst [c] (LEAL2 x y)) + // result: (ADDLconst [c] (LEAL2 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -111,7 +113,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAL2(v *Value) bool { } v.reset(OpAMD64ADDLconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, t) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -124,7 +126,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAL4(v *Value) bool { b := v.Block // match: (LEAL4 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDLconst [c] (LEAL4 x y)) + // result: (ADDLconst [c] (LEAL4 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -136,7 +138,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAL4(v *Value) bool { } v.reset(OpAMD64ADDLconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, t) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -149,7 +151,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAL8(v *Value) bool { b := v.Block // match: (LEAL8 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDLconst [c] (LEAL8 x y)) + // result: (ADDLconst [c] (LEAL8 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -161,7 +163,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAL8(v *Value) bool { } v.reset(OpAMD64ADDLconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, t) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -174,8 +176,9 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ1(v *Value) bool { b := v.Block // match: (LEAQ1 [c] {s} x y) // cond: isPtr(x.Type) && c != 0 && s == nil - // result: (ADDQ x (ADDQconst [c] y)) + // result: (ADDQ x (ADDQconst [c] y)) for { + t := v.Type c := auxIntToInt32(v.AuxInt) s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -185,7 +188,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ1(v *Value) bool { continue } v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, y.Type) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) v0.AuxInt = int32ToAuxInt(c) v0.AddArg(y) v.AddArg2(x, v0) @@ -195,8 +198,9 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ1(v *Value) bool { } // match: (LEAQ1 [c] {s} x y) // cond: !isPtr(x.Type) && c != 0 && s == nil - // result: (ADDQ y (ADDQconst [c] x)) + // result: (ADDQ y (ADDQconst [c] x)) for { + t := v.Type c := auxIntToInt32(v.AuxInt) s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -206,7 +210,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ1(v *Value) bool { continue } v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) v0.AuxInt = int32ToAuxInt(c) v0.AddArg(x) v.AddArg2(y, v0) @@ -222,7 +226,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ2(v *Value) bool { b := v.Block // match: (LEAQ2 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDQconst [c] (LEAQ2 x y)) + // result: (ADDQconst [c] (LEAQ2 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -234,7 +238,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ2(v *Value) bool { } v.reset(OpAMD64ADDQconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, t) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -247,7 +251,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ4(v *Value) bool { b := v.Block // match: (LEAQ4 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDQconst [c] (LEAQ4 x y)) + // result: (ADDQconst [c] (LEAQ4 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -259,7 +263,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ4(v *Value) bool { } v.reset(OpAMD64ADDQconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, t) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -272,7 +276,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ8(v *Value) bool { b := v.Block // match: (LEAQ8 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDQconst [c] (LEAQ8 x y)) + // result: (ADDQconst [c] (LEAQ8 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -284,7 +288,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAQ8(v *Value) bool { } v.reset(OpAMD64ADDQconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, t) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -297,8 +301,9 @@ func rewriteValueAMD64latelower_OpAMD64LEAW1(v *Value) bool { b := v.Block // match: (LEAW1 [c] {s} x y) // cond: isPtr(x.Type) && c != 0 && s == nil - // result: (ADDL x (ADDLconst [c] y)) + // result: (ADDL x (ADDLconst [c] y)) for { + t := v.Type c := auxIntToInt32(v.AuxInt) s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -308,7 +313,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAW1(v *Value) bool { continue } v.reset(OpAMD64ADDL) - v0 := b.NewValue0(v.Pos, OpAMD64ADDLconst, y.Type) + v0 := b.NewValue0(v.Pos, OpAMD64ADDLconst, t) v0.AuxInt = int32ToAuxInt(c) v0.AddArg(y) v.AddArg2(x, v0) @@ -318,8 +323,9 @@ func rewriteValueAMD64latelower_OpAMD64LEAW1(v *Value) bool { } // match: (LEAW1 [c] {s} x y) // cond: !isPtr(x.Type) && c != 0 && s == nil - // result: (ADDL y (ADDLconst [c] x)) + // result: (ADDL y (ADDLconst [c] x)) for { + t := v.Type c := auxIntToInt32(v.AuxInt) s := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -329,7 +335,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAW1(v *Value) bool { continue } v.reset(OpAMD64ADDL) - v0 := b.NewValue0(v.Pos, OpAMD64ADDLconst, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64ADDLconst, t) v0.AuxInt = int32ToAuxInt(c) v0.AddArg(x) v.AddArg2(y, v0) @@ -345,7 +351,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAW2(v *Value) bool { b := v.Block // match: (LEAW2 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDLconst [c] (LEAW2 x y)) + // result: (ADDLconst [c] (LEAW2 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -357,7 +363,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAW2(v *Value) bool { } v.reset(OpAMD64ADDLconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAW2, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAW2, t) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -370,7 +376,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAW4(v *Value) bool { b := v.Block // match: (LEAW4 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDLconst [c] (LEAW4 x y)) + // result: (ADDLconst [c] (LEAW4 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -382,7 +388,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAW4(v *Value) bool { } v.reset(OpAMD64ADDLconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAW4, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAW4, t) v0.AddArg2(x, y) v.AddArg(v0) return true @@ -395,7 +401,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAW8(v *Value) bool { b := v.Block // match: (LEAW8 [c] {s} x y) // cond: !isPtr(t) && c != 0 && s == nil - // result: (ADDLconst [c] (LEAW8 x y)) + // result: (ADDLconst [c] (LEAW8 x y)) for { t := v.Type c := auxIntToInt32(v.AuxInt) @@ -407,7 +413,7 @@ func rewriteValueAMD64latelower_OpAMD64LEAW8(v *Value) bool { } v.reset(OpAMD64ADDLconst) v.AuxInt = int32ToAuxInt(c) - v0 := b.NewValue0(v.Pos, OpAMD64LEAW8, x.Type) + v0 := b.NewValue0(v.Pos, OpAMD64LEAW8, t) v0.AddArg2(x, y) v.AddArg(v0) return true -- 2.50.0