From: Meng Zhuo Date: Thu, 7 May 2020 10:08:35 +0000 (+0800) Subject: cmd/compile: complete adjust MIPS64x rewrite rules to use typed aux fields X-Git-Tag: go1.16beta1~1239 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=4e4d5df0b0af23d2fcb5690e89f27bd7e64e48f1;p=gostls13.git cmd/compile: complete adjust MIPS64x rewrite rules to use typed aux fields Follow CL 228937 All MIPS64x rewrite rules has been converted into =>. toolstash-check passed for b1b67841d1..f2ad426737 Change-Id: I7eb7541ae1b86a005770003b61c555672660d2e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/230778 Reviewed-by: Keith Randall Run-TryBot: Meng Zhuo TryBot-Result: Gobot Gobot --- diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules index 9d319e0df2..e008ec8703 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules @@ -11,8 +11,8 @@ (Mul(64|32|16|8) x y) => (Select1 (MULVU x y)) (Mul(32|64)F ...) => (MUL(F|D) ...) (Mul64uhilo ...) => (MULVU ...) -(Select0 (Mul64uover x y)) -> (Select1 (MULVU x y)) -(Select1 (Mul64uover x y)) -> (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) +(Select0 (Mul64uover x y)) => (Select1 (MULVU x y)) +(Select1 (Mul64uover x y)) => (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) (Hmul64 x y) => (Select0 (MULV x y)) (Hmul64u x y) => (Select0 (MULVU x y)) @@ -38,8 +38,8 @@ (Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) (Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) -// (x + y) / 2 with x>=y -> (x - y) / 2 + y -(Avg64u x y) -> (ADDV (SRLVconst (SUBV x y) [1]) y) +// (x + y) / 2 with x>=y => (x - y) / 2 + y +(Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) (And(64|32|16|8) ...) => (AND ...) (Or(64|32|16|8) ...) => (OR ...) @@ -130,10 +130,10 @@ (Not x) => (XORconst [1] x) // constants -(Const(64|32|16|8) ...) -> (MOVVconst ...) -(Const(32|64)F ...) -> (MOV(F|D)const ...) +(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)]) +(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) (ConstNil) => (MOVVconst [0]) -(ConstBool ...) -> (MOVVconst ...) +(ConstBool [b]) => (MOVVconst [int64(b2i(b))]) (Slicemask x) => (SRAVconst (NEGV x) [63]) @@ -161,7 +161,7 @@ (SignExt16to64 ...) => (MOVHreg ...) (SignExt32to64 ...) => (MOVWreg ...) -// float <-> int conversion +// float <=> int conversion (Cvt32to32F ...) => (MOVWF ...) (Cvt32to64F ...) => (MOVWD ...) (Cvt64to32F ...) => (MOVVF ...) @@ -214,11 +214,11 @@ (Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) (Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) -(OffPtr [off] ptr:(SP)) -> (MOVVaddr [off] ptr) -(OffPtr [off] ptr) -> (ADDVconst [off] ptr) +(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDVconst [off] ptr) -(Addr ...) -> (MOVVaddr ...) -(LocalAddr {sym} base _) -> (MOVVaddr {sym} base) +(Addr {sym} base) => (MOVVaddr {sym} base) +(LocalAddr {sym} base _) => (MOVVaddr {sym} base) // loads (Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) @@ -380,24 +380,17 @@ (InterCall ...) => (CALLinter ...) // atomic intrinsics -(AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...) -(AtomicLoad32 ...) -> (LoweredAtomicLoad32 ...) -(AtomicLoad64 ...) -> (LoweredAtomicLoad64 ...) -(AtomicLoadPtr ...) -> (LoweredAtomicLoad64 ...) +(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...) +(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...) -(AtomicStore8 ...) -> (LoweredAtomicStore8 ...) -(AtomicStore32 ...) -> (LoweredAtomicStore32 ...) -(AtomicStore64 ...) -> (LoweredAtomicStore64 ...) -(AtomicStorePtrNoWB ...) -> (LoweredAtomicStore64 ...) +(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...) +(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...) -(AtomicExchange32 ...) -> (LoweredAtomicExchange32 ...) -(AtomicExchange64 ...) -> (LoweredAtomicExchange64 ...) +(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) -(AtomicAdd32 ...) -> (LoweredAtomicAdd32 ...) -(AtomicAdd64 ...) -> (LoweredAtomicAdd64 ...) +(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) -(AtomicCompareAndSwap32 ...) -> (LoweredAtomicCas32 ...) -(AtomicCompareAndSwap64 ...) -> (LoweredAtomicCas64 ...) +(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...) // checks (NilCheck ...) => (LoweredNilCheck ...) @@ -444,69 +437,69 @@ (EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) // fold offset into address -(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) -> (MOVVaddr [off1+off2] {sym} ptr) +(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) // fold address into load/store -(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem) -(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBUload [off1+off2] {sym} ptr mem) -(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHload [off1+off2] {sym} ptr mem) -(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHUload [off1+off2] {sym} ptr mem) -(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) -(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWUload [off1+off2] {sym} ptr mem) -(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVVload [off1+off2] {sym} ptr mem) -(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVFload [off1+off2] {sym} ptr mem) -(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDload [off1+off2] {sym} ptr mem) - -(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) -(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} ptr val mem) -(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) -(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVVstore [off1+off2] {sym} ptr val mem) -(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVFstore [off1+off2] {sym} ptr val mem) -(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} ptr val mem) -(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem) -(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem) -(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem) -(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVVstorezero [off1+off2] {sym} ptr mem) - -(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVVload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - -(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVVstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) -> - (MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem) +(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) +(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem) +(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) +(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem) +(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) +(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem) +(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem) +(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem) + +(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) +(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) +(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) +(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) +(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) +(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) + +(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVVload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVFload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVDload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) + +(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVVstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVFstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVDstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVVstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) // store zero (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) @@ -643,10 +636,9 @@ (MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) (MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) (MOVVreg (MOVVconst [c])) => (MOVVconst [c]) -(LoweredAtomicStore32 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero32 ptr mem) -(LoweredAtomicStore64 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero64 ptr mem) -(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst32 [c] ptr mem) -(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst64 [c] ptr mem) +(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem) +(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem) +(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) // constant comparisons (SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index ae7f496657..dfff1c03b7 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -29,8 +29,7 @@ func rewriteValueMIPS64(v *Value) bool { v.Op = OpMIPS64ADDV return true case OpAddr: - v.Op = OpMIPS64MOVVaddr - return true + return rewriteValueMIPS64_OpAddr(v) case OpAnd16: v.Op = OpMIPS64AND return true @@ -102,26 +101,19 @@ func rewriteValueMIPS64(v *Value) bool { case OpCom8: return rewriteValueMIPS64_OpCom8(v) case OpConst16: - v.Op = OpMIPS64MOVVconst - return true + return rewriteValueMIPS64_OpConst16(v) case OpConst32: - v.Op = OpMIPS64MOVVconst - return true + return rewriteValueMIPS64_OpConst32(v) case OpConst32F: - v.Op = OpMIPS64MOVFconst - return true + return rewriteValueMIPS64_OpConst32F(v) case OpConst64: - v.Op = OpMIPS64MOVVconst - return true + return rewriteValueMIPS64_OpConst64(v) case OpConst64F: - v.Op = OpMIPS64MOVDconst - return true + return rewriteValueMIPS64_OpConst64F(v) case OpConst8: - v.Op = OpMIPS64MOVVconst - return true + return rewriteValueMIPS64_OpConst8(v) case OpConstBool: - v.Op = OpMIPS64MOVVconst - return true + return rewriteValueMIPS64_OpConstBool(v) case OpConstNil: return rewriteValueMIPS64_OpConstNil(v) case OpCvt32Fto32: @@ -684,6 +676,19 @@ func rewriteValueMIPS64(v *Value) bool { } return false } +func rewriteValueMIPS64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVVaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpMIPS64MOVVaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} func rewriteValueMIPS64_OpAvg64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -696,7 +701,7 @@ func rewriteValueMIPS64_OpAvg64u(v *Value) bool { y := v_1 v.reset(OpMIPS64ADDV) v0 := b.NewValue0(v.Pos, OpMIPS64SRLVconst, t) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPS64SUBV, t) v1.AddArg2(x, y) v0.AddArg(v1) @@ -764,6 +769,76 @@ func rewriteValueMIPS64_OpCom8(v *Value) bool { return true } } +func rewriteValueMIPS64_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst32F(v *Value) bool { + // match: (Const32F [val]) + // result: (MOVFconst [float64(val)]) + for { + val := auxIntToFloat32(v.AuxInt) + v.reset(OpMIPS64MOVFconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst64F(v *Value) bool { + // match: (Const64F [val]) + // result: (MOVDconst [float64(val)]) + for { + val := auxIntToFloat64(v.AuxInt) + v.reset(OpMIPS64MOVDconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueMIPS64_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueMIPS64_OpConstBool(v *Value) bool { + // match: (ConstBool [b]) + // result: (MOVVconst [int64(b2i(b))]) + for { + b := auxIntToBool(v.AuxInt) + v.reset(OpMIPS64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(b2i(b))) + return true + } +} func rewriteValueMIPS64_OpConstNil(v *Value) bool { // match: (ConstNil) // result: (MOVVconst [0]) @@ -1750,10 +1825,10 @@ func rewriteValueMIPS64_OpLocalAddr(v *Value) bool { // match: (LocalAddr {sym} base _) // result: (MOVVaddr {sym} base) for { - sym := v.Aux + sym := auxToSym(v.Aux) base := v_0 v.reset(OpMIPS64MOVVaddr) - v.Aux = sym + v.Aux = symToAux(sym) v.AddArg(base) return true } @@ -2209,18 +2284,22 @@ func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { v_0 := v.Args[0] // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) - // result: (MOVVaddr [off1+off2] {sym} ptr) + // cond: is32Bit(off1+int64(off2)) + // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) for { - off1 := v.AuxInt + off1 := auxIntToInt64(v.AuxInt) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) ptr := v_0.Args[0] + if !(is32Bit(off1 + int64(off2))) { + break + } v.reset(OpMIPS64MOVVaddr) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2)) + v.Aux = symToAux(sym) v.AddArg(ptr) return true } @@ -2375,19 +2454,19 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v *Value) bool { v_0 := v.Args[0] // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) // cond: is32Bit(c) - // result: (LoweredAtomicAddconst32 [c] ptr mem) + // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpMIPS64LoweredAtomicAddconst32) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } @@ -2405,13 +2484,13 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpMIPS64LoweredAtomicAddconst64) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg2(ptr, mem) return true } @@ -2425,7 +2504,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v *Value) bool { // result: (LoweredAtomicStorezero32 ptr mem) for { ptr := v_0 - if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 @@ -2443,7 +2522,7 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v *Value) bool { // result: (LoweredAtomicStorezero64 ptr mem) for { ptr := v_0 - if v_1.Op != OpMIPS64MOVVconst || v_1.AuxInt != 0 { + if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 @@ -2457,45 +2536,45 @@ func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVBUload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVBUload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVBUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -2542,45 +2621,45 @@ func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVBload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVBload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -2628,47 +2707,47 @@ func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVBstore [off1+off2] {sym} ptr val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -2796,45 +2875,45 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVBstorezero [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVBstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVBstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -2844,45 +2923,45 @@ func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVDload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVDload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -2893,47 +2972,47 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVDstore [off1+off2] {sym} ptr val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVDstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVDstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -2943,45 +3022,45 @@ func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVFload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVFload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVFload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVFload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVFload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -2992,47 +3071,47 @@ func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVFstore [off1+off2] {sym} ptr val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVFstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVFstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVFstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -3042,45 +3121,45 @@ func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVHUload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVHUload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVHUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -3149,45 +3228,45 @@ func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVHload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVHload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVHload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -3279,47 +3358,47 @@ func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVHstore [off1+off2] {sym} ptr val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVHstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVHstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -3413,45 +3492,45 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVHstorezero [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVHstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVHstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -3461,45 +3540,45 @@ func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVVload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVVload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVVload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVVload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVVload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -3537,47 +3616,47 @@ func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVVstore [off1+off2] {sym} ptr val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVVstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVVstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVVstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVVstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -3603,45 +3682,45 @@ func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVVstorezero [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVVstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVVstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVVstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -3651,45 +3730,45 @@ func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVWUload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVWUload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVWUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -3780,45 +3859,45 @@ func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVWload [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -3943,47 +4022,47 @@ func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVWstore [off1+off2] {sym} ptr val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -4043,45 +4122,45 @@ func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVWstorezero [off1+off2] {sym} ptr mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpMIPS64ADDVconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + off2)) { break } v.reset(OpMIPS64MOVWstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) - // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpMIPS64MOVVaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpMIPS64MOVWstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -5697,25 +5776,26 @@ func rewriteValueMIPS64_OpNot(v *Value) bool { func rewriteValueMIPS64_OpOffPtr(v *Value) bool { v_0 := v.Args[0] // match: (OffPtr [off] ptr:(SP)) - // result: (MOVVaddr [off] ptr) + // cond: is32Bit(off) + // result: (MOVVaddr [int32(off)] ptr) for { - off := v.AuxInt + off := auxIntToInt64(v.AuxInt) ptr := v_0 - if ptr.Op != OpSP { + if ptr.Op != OpSP || !(is32Bit(off)) { break } v.reset(OpMIPS64MOVVaddr) - v.AuxInt = off + v.AuxInt = int32ToAuxInt(int32(off)) v.AddArg(ptr) return true } // match: (OffPtr [off] ptr) // result: (ADDVconst [off] ptr) for { - off := v.AuxInt + off := auxIntToInt64(v.AuxInt) ptr := v_0 v.reset(OpMIPS64ADDVconst) - v.AuxInt = off + v.AuxInt = int64ToAuxInt(off) v.AddArg(ptr) return true } @@ -6869,7 +6949,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v1.AddArg2(x, y) v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) - v2.AuxInt = 0 + v2.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, v2) return true }