For c + nil, we want the result to still be of pointer type.
Fixes ppc64le build failure with CL 468455, in issue33724.go.
The problem in that test is that it requires a nil check to be
scheduled before the corresponding load. This normally happens fine
because we prioritize nil checks. If we have nilcheck(p) and load(p),
once p is scheduled the nil check will always go before the load.
The issue we saw in 33724 is that when p is a nil pointer, we ended up
with two different p's, an int64(0) as the argument to the nil check
and an (*Outer)(0) as the argument to the load. Those two zeroes don't
get CSEd, so if the (*Outer)(0) happens to get scheduled first, the
load can end up before the nilcheck.
Fix this by always having constant arithmetic preserve the pointerness
of the value, so that both zeroes are of type *Outer and get CSEd.
Update #58482
Update #33724
Change-Id: Ib9b8c0446f1690b574e0f3c0afb9934efbaf3513
Reviewed-on: https://go-review.googlesource.com/c/go/+/468615
Reviewed-by: Keith Randall <khr@google.com>
Run-TryBot: Keith Randall <khr@golang.org>
Reviewed-by: David Chase <drchase@google.com>
TryBot-Bypass: Keith Randall <khr@golang.org>
(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
// fold constants into instructions
-(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
+(ADDL x (MOVLconst <t> [c])) && !t.IsPtr() => (ADDLconst [c] x)
(ADDLcarry x (MOVLconst [c])) => (ADDLconstcarry [c] x)
(ADCL x (MOVLconst [c]) f) => (ADCLconst [c] x f)
// (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
// fold constants into instructions
-(ADDQ x (MOVQconst [c])) && is32Bit(c) => (ADDQconst [int32(c)] x)
+(ADDQ x (MOVQconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDQconst [int32(c)] x)
(ADDQ x (MOVLconst [c])) => (ADDQconst [c] x)
(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
(MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHreg x)
// fold constant into arithmetic ops
-(ADD x (MOVWconst [c])) => (ADDconst [c] x)
+(ADD x (MOVWconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
(SUB (MOVWconst [c]) x) => (RSBconst [c] x)
(SUB x (MOVWconst [c])) => (SUBconst [c] x)
(RSB (MOVWconst [c]) x) => (SUBconst [c] x)
(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
// fold constant into arithmetic ops
-(ADD x (MOVDconst [c])) => (ADDconst [c] x)
+(ADD x (MOVDconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
(SUB x (MOVDconst [c])) => (SUBconst [c] x)
(AND x (MOVDconst [c])) => (ANDconst [c] x)
(OR x (MOVDconst [c])) => (ORconst [c] x)
(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
// fold constant into arithmetic ops
-(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x)
+(ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
(MOVWnop (MOVWconst [c])) => (MOVWconst [c])
// fold constant into arithmetic ops
-(ADD x (MOVWconst [c])) => (ADDconst [c] x)
+(ADD x (MOVWconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
(SUB x (MOVWconst [c])) => (SUBconst [c] x)
(AND x (MOVWconst [c])) => (ANDconst [c] x)
(OR x (MOVWconst [c])) => (ORconst [c] x)
(MOVVnop (MOVVconst [c])) => (MOVVconst [c])
// fold constant into arithmetic ops
-(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x)
+(ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
// Arithmetic constant ops
-(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x)
+(ADD x (MOVDconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [c] x)
(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
(ADDconst [0] x) => x
(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
// Fold constant into immediate instructions where possible.
-(ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x)
+(ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
(BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no)
// Fold constants into instructions.
-(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [int32(c)] x)
+(ADD x (MOVDconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [int32(c)] x)
(ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x)
(SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)])
(I64LeU (I64Const [1]) x) => (I64Eqz (I64Eqz x))
(I64Ne x (I64Const [0])) => (I64Eqz (I64Eqz x))
-(I64Add x (I64Const [y])) => (I64AddConst [y] x)
+(I64Add x (I64Const <t> [y])) && !t.IsPtr() => (I64AddConst [y] x)
(I64AddConst [0] x) => x
(I64Eqz (I64Eqz (I64Eqz x))) => (I64Eqz x)
func rewriteValue386_Op386ADDL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ADDL x (MOVLconst [c]))
+ // match: (ADDL x (MOVLconst <t> [c]))
+ // cond: !t.IsPtr()
// result: (ADDLconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != Op386MOVLconst {
continue
}
+ t := v_1.Type
c := auxIntToInt32(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ continue
+ }
v.reset(Op386ADDLconst)
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ADDQ x (MOVQconst [c]))
- // cond: is32Bit(c)
+ // match: (ADDQ x (MOVQconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
// result: (ADDQconst [int32(c)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != OpAMD64MOVQconst {
continue
}
+ t := v_1.Type
c := auxIntToInt64(v_1.AuxInt)
- if !(is32Bit(c)) {
+ if !(is32Bit(c) && !t.IsPtr()) {
continue
}
v.reset(OpAMD64ADDQconst)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (ADD x (MOVWconst [c]))
+ // match: (ADD x (MOVWconst <t> [c]))
+ // cond: !t.IsPtr()
// result: (ADDconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != OpARMMOVWconst {
continue
}
+ t := v_1.Type
c := auxIntToInt32(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ continue
+ }
v.reset(OpARMADDconst)
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
func rewriteValueARM64_OpARM64ADD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ADD x (MOVDconst [c]))
+ // match: (ADD x (MOVDconst <t> [c]))
+ // cond: !t.IsPtr()
// result: (ADDconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != OpARM64MOVDconst {
continue
}
+ t := v_1.Type
c := auxIntToInt64(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ continue
+ }
v.reset(OpARM64ADDconst)
v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
func rewriteValueLOONG64_OpLOONG64ADDV(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ADDV x (MOVVconst [c]))
- // cond: is32Bit(c)
+ // match: (ADDV x (MOVVconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
// result: (ADDVconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != OpLOONG64MOVVconst {
continue
}
+ t := v_1.Type
c := auxIntToInt64(v_1.AuxInt)
- if !(is32Bit(c)) {
+ if !(is32Bit(c) && !t.IsPtr()) {
continue
}
v.reset(OpLOONG64ADDVconst)
func rewriteValueMIPS_OpMIPSADD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ADD x (MOVWconst [c]))
+ // match: (ADD x (MOVWconst <t> [c]))
+ // cond: !t.IsPtr()
// result: (ADDconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != OpMIPSMOVWconst {
continue
}
+ t := v_1.Type
c := auxIntToInt32(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ continue
+ }
v.reset(OpMIPSADDconst)
v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ADDV x (MOVVconst [c]))
- // cond: is32Bit(c)
+ // match: (ADDV x (MOVVconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
// result: (ADDVconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != OpMIPS64MOVVconst {
continue
}
+ t := v_1.Type
c := auxIntToInt64(v_1.AuxInt)
- if !(is32Bit(c)) {
+ if !(is32Bit(c) && !t.IsPtr()) {
continue
}
v.reset(OpMIPS64ADDVconst)
}
break
}
- // match: (ADD x (MOVDconst [c]))
- // cond: is32Bit(c)
+ // match: (ADD x (MOVDconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
// result: (ADDconst [c] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != OpPPC64MOVDconst {
continue
}
+ t := v_1.Type
c := auxIntToInt64(v_1.AuxInt)
- if !(is32Bit(c)) {
+ if !(is32Bit(c) && !t.IsPtr()) {
continue
}
v.reset(OpPPC64ADDconst)
func rewriteValueRISCV64_OpRISCV64ADD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ADD (MOVDconst [val]) x)
- // cond: is32Bit(val)
+ // match: (ADD (MOVDconst <t> [val]) x)
+ // cond: is32Bit(val) && !t.IsPtr()
// result: (ADDI [val] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpRISCV64MOVDconst {
continue
}
+ t := v_0.Type
val := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(is32Bit(val)) {
+ if !(is32Bit(val) && !t.IsPtr()) {
continue
}
v.reset(OpRISCV64ADDI)
func rewriteValueS390X_OpS390XADD(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (ADD x (MOVDconst [c]))
- // cond: is32Bit(c)
+ // match: (ADD x (MOVDconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
// result: (ADDconst [int32(c)] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_1.Op != OpS390XMOVDconst {
continue
}
+ t := v_1.Type
c := auxIntToInt64(v_1.AuxInt)
- if !(is32Bit(c)) {
+ if !(is32Bit(c) && !t.IsPtr()) {
continue
}
v.reset(OpS390XADDconst)
v.AddArg2(y, v0)
return true
}
- // match: (I64Add x (I64Const [y]))
+ // match: (I64Add x (I64Const <t> [y]))
+ // cond: !t.IsPtr()
// result: (I64AddConst [y] x)
for {
x := v_0
if v_1.Op != OpWasmI64Const {
break
}
+ t := v_1.Type
y := auxIntToInt64(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ break
+ }
v.reset(OpWasmI64AddConst)
v.AuxInt = int64ToAuxInt(y)
v.AddArg(x)