From 81df5e69fc189af44459a4c6520b1c99d0210a92 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Wed, 22 Apr 2020 10:08:41 +0200 Subject: [PATCH] cmd/compile: switch to typed aux for mips lowering rules This covers most of the lowering rules. Passes GOARCH=mips gotip build -toolexec 'toolstash -cmp' -a std GOARCH=mipsle gotip build -toolexec 'toolstash -cmp' -a std Change-Id: I9d00aaebecb36622e3bdaf556e5a9377670bf86b Reviewed-on: https://go-review.googlesource.com/c/go/+/229102 Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/MIPS.rules | 410 +++++------ src/cmd/compile/internal/ssa/rewriteMIPS.go | 740 +++++++++++--------- 2 files changed, 599 insertions(+), 551 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules index 3091a84acc..964d244b98 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules @@ -2,266 +2,266 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -(Add(Ptr|32|16|8) ...) -> (ADD ...) -(Add(32|64)F ...) -> (ADD(F|D) ...) +(Add(Ptr|32|16|8) ...) => (ADD ...) +(Add(32|64)F ...) => (ADD(F|D) ...) -(Select0 (Add32carry x y)) -> (ADD x y) -(Select1 (Add32carry x y)) -> (SGTU x (ADD x y)) -(Add32withcarry x y c) -> (ADD c (ADD x y)) +(Select0 (Add32carry x y)) => (ADD x y) +(Select1 (Add32carry x y)) => (SGTU x (ADD x y)) +(Add32withcarry x y c) => (ADD c (ADD x y)) -(Sub(Ptr|32|16|8) ...) -> (SUB ...) -(Sub(32|64)F ...) -> (SUB(F|D) ...) +(Sub(Ptr|32|16|8) ...) => (SUB ...) +(Sub(32|64)F ...) => (SUB(F|D) ...) -(Select0 (Sub32carry x y)) -> (SUB x y) -(Select1 (Sub32carry x y)) -> (SGTU (SUB x y) x) -(Sub32withcarry x y c) -> (SUB (SUB x y) c) +(Select0 (Sub32carry x y)) => (SUB x y) +(Select1 (Sub32carry x y)) => (SGTU (SUB x y) x) +(Sub32withcarry x y c) => (SUB (SUB x y) c) -(Mul(32|16|8) ...) -> (MUL ...) -(Mul(32|64)F ...) -> (MUL(F|D) ...) +(Mul(32|16|8) ...) => (MUL ...) +(Mul(32|64)F ...) => (MUL(F|D) ...) -(Hmul(32|32u) x y) -> (Select0 (MUL(T|TU) x y)) -(Mul32uhilo ...) -> (MULTU ...) +(Hmul(32|32u) x y) => (Select0 (MUL(T|TU) x y)) +(Mul32uhilo ...) => (MULTU ...) -(Div32 x y) -> (Select1 (DIV x y)) -(Div32u x y) -> (Select1 (DIVU x y)) -(Div16 x y) -> (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y))) -(Div16u x y) -> (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) -(Div8 x y) -> (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y))) -(Div8u x y) -> (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) -(Div(32|64)F ...) -> (DIV(F|D) ...) +(Div32 x y) => (Select1 (DIV x y)) +(Div32u x y) => (Select1 (DIVU x y)) +(Div16 x y) => (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y))) +(Div16u x y) => (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Div8 x y) => (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y))) +(Div8u x y) => (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Div(32|64)F ...) => (DIV(F|D) ...) -(Mod32 x y) -> (Select0 (DIV x y)) -(Mod32u x y) -> (Select0 (DIVU x y)) -(Mod16 x y) -> (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y))) -(Mod16u x y) -> (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) -(Mod8 x y) -> (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y))) -(Mod8u x y) -> (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Mod32 x y) => (Select0 (DIV x y)) +(Mod32u x y) => (Select0 (DIVU x y)) +(Mod16 x y) => (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y))) +(Mod16u x y) => (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Mod8 x y) => (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y))) +(Mod8u x y) => (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) -// (x + y) / 2 with x>=y -> (x - y) / 2 + y -(Avg32u x y) -> (ADD (SRLconst (SUB x y) [1]) y) +// (x + y) / 2 with x>=y becomes (x - y) / 2 + y +(Avg32u x y) => (ADD (SRLconst (SUB x y) [1]) y) -(And(32|16|8) ...) -> (AND ...) -(Or(32|16|8) ...) -> (OR ...) -(Xor(32|16|8) ...) -> (XOR ...) +(And(32|16|8) ...) => (AND ...) +(Or(32|16|8) ...) => (OR ...) +(Xor(32|16|8) ...) => (XOR ...) // constant shifts // generic opt rewrites all constant shifts to shift by Const64 -(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SLLconst x [c]) -(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c]) -(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c]) -(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c]) -(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst x [16]) [c+16]) -(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst x [16]) [c+16]) -(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c]) -(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst x [24]) [c+24]) -(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst x [24]) [c+24]) +(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SLLconst x [int32(c)]) +(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SRAconst x [int32(c)]) +(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 => (SRLconst x [int32(c)]) +(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SLLconst x [int32(c)]) +(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SRAconst (SLLconst x [16]) [int32(c+16)]) +(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 => (SRLconst (SLLconst x [16]) [int32(c+16)]) +(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SLLconst x [int32(c)]) +(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SRAconst (SLLconst x [24]) [int32(c+24)]) +(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 => (SRLconst (SLLconst x [24]) [int32(c+24)]) // large constant shifts -(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0]) -(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0]) -(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0]) -(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0]) -(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0]) -(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0]) +(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0]) +(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0]) +(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0]) +(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0]) +(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0]) +(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0]) // large constant signed right shift, we leave the sign bit -(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31]) -(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst x [16]) [31]) -(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst x [24]) [31]) +(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 => (SRAconst x [31]) +(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 => (SRAconst (SLLconst x [16]) [31]) +(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 => (SRAconst (SLLconst x [24]) [31]) // shifts // hardware instruction uses only the low 5 bits of the shift // we compare to 32 to ensure Go semantics for large shifts -(Lsh32x32 x y) -> (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) -(Lsh32x16 x y) -> (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) -(Lsh32x8 x y) -> (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) +(Lsh32x32 x y) => (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) +(Lsh32x16 x y) => (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Lsh32x8 x y) => (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) -(Lsh16x32 x y) -> (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) -(Lsh16x16 x y) -> (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) -(Lsh16x8 x y) -> (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) +(Lsh16x32 x y) => (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) +(Lsh16x16 x y) => (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Lsh16x8 x y) => (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) -(Lsh8x32 x y) -> (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) -(Lsh8x16 x y) -> (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) -(Lsh8x8 x y) -> (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) +(Lsh8x32 x y) => (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) +(Lsh8x16 x y) => (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Lsh8x8 x y) => (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) -(Rsh32Ux32 x y) -> (CMOVZ (SRL x y) (MOVWconst [0]) (SGTUconst [32] y)) -(Rsh32Ux16 x y) -> (CMOVZ (SRL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) -(Rsh32Ux8 x y) -> (CMOVZ (SRL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) +(Rsh32Ux32 x y) => (CMOVZ (SRL x y) (MOVWconst [0]) (SGTUconst [32] y)) +(Rsh32Ux16 x y) => (CMOVZ (SRL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Rsh32Ux8 x y) => (CMOVZ (SRL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) -(Rsh16Ux32 x y) -> (CMOVZ (SRL (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) -(Rsh16Ux16 x y) -> (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) -(Rsh16Ux8 x y) -> (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) +(Rsh16Ux32 x y) => (CMOVZ (SRL (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) +(Rsh16Ux16 x y) => (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Rsh16Ux8 x y) => (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) -(Rsh8Ux32 x y) -> (CMOVZ (SRL (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) -(Rsh8Ux16 x y) -> (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) -(Rsh8Ux8 x y) -> (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) +(Rsh8Ux32 x y) => (CMOVZ (SRL (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) +(Rsh8Ux16 x y) => (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Rsh8Ux8 x y) => (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) -(Rsh32x32 x y) -> (SRA x ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) -(Rsh32x16 x y) -> (SRA x ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) -(Rsh32x8 x y) -> (SRA x ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) +(Rsh32x32 x y) => (SRA x ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) +(Rsh32x16 x y) => (SRA x ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) +(Rsh32x8 x y) => (SRA x ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) -(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) -(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) -(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) +(Rsh16x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) +(Rsh16x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) +(Rsh16x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) -(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) -(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) -(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) +(Rsh8x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [-1]) (SGTUconst [32] y))) +(Rsh8x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y)))) +(Rsh8x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y)))) // rotates -(RotateLeft8 x (MOVWconst [c])) -> (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) -(RotateLeft16 x (MOVWconst [c])) -> (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) -(RotateLeft32 x (MOVWconst [c])) -> (Or32 (Lsh32x32 x (MOVWconst [c&31])) (Rsh32Ux32 x (MOVWconst [-c&31]))) -(RotateLeft64 x (MOVWconst [c])) -> (Or64 (Lsh64x32 x (MOVWconst [c&63])) (Rsh64Ux32 x (MOVWconst [-c&63]))) +(RotateLeft8 x (MOVWconst [c])) => (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) +(RotateLeft16 x (MOVWconst [c])) => (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) +(RotateLeft32 x (MOVWconst [c])) => (Or32 (Lsh32x32 x (MOVWconst [c&31])) (Rsh32Ux32 x (MOVWconst [-c&31]))) +(RotateLeft64 x (MOVWconst [c])) => (Or64 (Lsh64x32 x (MOVWconst [c&63])) (Rsh64Ux32 x (MOVWconst [-c&63]))) // unary ops -(Neg(32|16|8) ...) -> (NEG ...) -(Neg(32|64)F ...) -> (NEG(F|D) ...) +(Neg(32|16|8) ...) => (NEG ...) +(Neg(32|64)F ...) => (NEG(F|D) ...) -(Com(32|16|8) x) -> (NORconst [0] x) +(Com(32|16|8) x) => (NORconst [0] x) -(Sqrt ...) -> (SQRTD ...) +(Sqrt ...) => (SQRTD ...) // TODO: optimize this case? -(Ctz32NonZero ...) -> (Ctz32 ...) +(Ctz32NonZero ...) => (Ctz32 ...) // count trailing zero // 32 - CLZ(x&-x - 1) -(Ctz32 x) -> (SUB (MOVWconst [32]) (CLZ (SUBconst [1] (AND x (NEG x))))) +(Ctz32 x) => (SUB (MOVWconst [32]) (CLZ (SUBconst [1] (AND x (NEG x))))) // bit length -(BitLen32 x) -> (SUB (MOVWconst [32]) (CLZ x)) +(BitLen32 x) => (SUB (MOVWconst [32]) (CLZ x)) // boolean ops -- booleans are represented with 0=false, 1=true -(AndB ...) -> (AND ...) -(OrB ...) -> (OR ...) -(EqB x y) -> (XORconst [1] (XOR x y)) -(NeqB ...) -> (XOR ...) -(Not x) -> (XORconst [1] x) +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XORconst [1] (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XORconst [1] x) // constants -(Const(32|16|8) ...) -> (MOVWconst ...) -(Const(32|64)F ...) -> (MOV(F|D)const ...) -(ConstNil) -> (MOVWconst [0]) -(ConstBool ...) -> (MOVWconst ...) +(Const(32|16|8) [val]) => (MOVWconst [int32(val)]) +(Const(32|64)F ...) => (MOV(F|D)const ...) +(ConstNil) => (MOVWconst [0]) +(ConstBool [b]) => (MOVWconst [int32(b2i(b))]) // truncations // Because we ignore high parts of registers, truncates are just copies. -(Trunc16to8 ...) -> (Copy ...) -(Trunc32to8 ...) -> (Copy ...) -(Trunc32to16 ...) -> (Copy ...) +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) // Zero-/Sign-extensions -(ZeroExt8to16 ...) -> (MOVBUreg ...) -(ZeroExt8to32 ...) -> (MOVBUreg ...) -(ZeroExt16to32 ...) -> (MOVHUreg ...) +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) -(SignExt8to16 ...) -> (MOVBreg ...) -(SignExt8to32 ...) -> (MOVBreg ...) -(SignExt16to32 ...) -> (MOVHreg ...) +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) -(Signmask x) -> (SRAconst x [31]) -(Zeromask x) -> (NEG (SGTU x (MOVWconst [0]))) -(Slicemask x) -> (SRAconst (NEG x) [31]) +(Signmask x) => (SRAconst x [31]) +(Zeromask x) => (NEG (SGTU x (MOVWconst [0]))) +(Slicemask x) => (SRAconst (NEG x) [31]) -// float <-> int conversion -(Cvt32to(32|64)F ...) -> (MOVW(F|D) ...) -(Cvt(32|64)Fto32 ...) -> (TRUNC(F|D)W ...) -(Cvt32Fto64F ...) -> (MOVFD ...) -(Cvt64Fto32F ...) -> (MOVDF ...) +// float-int conversion +(Cvt32to(32|64)F ...) => (MOVW(F|D) ...) +(Cvt(32|64)Fto32 ...) => (TRUNC(F|D)W ...) +(Cvt32Fto64F ...) => (MOVFD ...) +(Cvt64Fto32F ...) => (MOVDF ...) -(CvtBoolToUint8 ...) -> (Copy ...) +(CvtBoolToUint8 ...) => (Copy ...) -(Round(32|64)F ...) -> (Copy ...) +(Round(32|64)F ...) => (Copy ...) // comparisons -(Eq8 x y) -> (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y))) -(Eq16 x y) -> (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y))) -(Eq32 x y) -> (SGTUconst [1] (XOR x y)) -(EqPtr x y) -> (SGTUconst [1] (XOR x y)) -(Eq(32|64)F x y) -> (FPFlagTrue (CMPEQ(F|D) x y)) - -(Neq8 x y) -> (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0])) -(Neq16 x y) -> (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0])) -(Neq32 x y) -> (SGTU (XOR x y) (MOVWconst [0])) -(NeqPtr x y) -> (SGTU (XOR x y) (MOVWconst [0])) -(Neq(32|64)F x y) -> (FPFlagFalse (CMPEQ(F|D) x y)) - -(Less8 x y) -> (SGT (SignExt8to32 y) (SignExt8to32 x)) -(Less16 x y) -> (SGT (SignExt16to32 y) (SignExt16to32 x)) -(Less32 x y) -> (SGT y x) -(Less(32|64)F x y) -> (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN - -(Less8U x y) -> (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)) -(Less16U x y) -> (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)) -(Less32U x y) -> (SGTU y x) - -(Leq8 x y) -> (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y))) -(Leq16 x y) -> (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y))) -(Leq32 x y) -> (XORconst [1] (SGT x y)) -(Leq(32|64)F x y) -> (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN - -(Leq8U x y) -> (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))) -(Leq16U x y) -> (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))) -(Leq32U x y) -> (XORconst [1] (SGTU x y)) - -(OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr) -(OffPtr [off] ptr) -> (ADDconst [off] ptr) - -(Addr ...) -> (MOVWaddr ...) -(LocalAddr {sym} base _) -> (MOVWaddr {sym} base) +(Eq8 x y) => (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Eq16 x y) => (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Eq32 x y) => (SGTUconst [1] (XOR x y)) +(EqPtr x y) => (SGTUconst [1] (XOR x y)) +(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) + +(Neq8 x y) => (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0])) +(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0])) +(Neq32 x y) => (SGTU (XOR x y) (MOVWconst [0])) +(NeqPtr x y) => (SGTU (XOR x y) (MOVWconst [0])) +(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) + +(Less8 x y) => (SGT (SignExt8to32 y) (SignExt8to32 x)) +(Less16 x y) => (SGT (SignExt16to32 y) (SignExt16to32 x)) +(Less32 x y) => (SGT y x) +(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN + +(Less8U x y) => (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)) +(Less16U x y) => (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)) +(Less32U x y) => (SGTU y x) + +(Leq8 x y) => (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y))) +(Leq16 x y) => (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y))) +(Leq32 x y) => (XORconst [1] (SGT x y)) +(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN + +(Leq8U x y) => (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Leq16U x y) => (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Leq32U x y) => (XORconst [1] (SGTU x y)) + +(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr) + +(Addr {sym} base) => (MOVWaddr {sym} base) +(LocalAddr {sym} base _) => (MOVWaddr {sym} base) // loads -(Load ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem) -(Load ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem) -(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem) -(Load ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem) -(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem) -(Load ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVWload ptr mem) -(Load ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem) -(Load ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem) +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) // stores -(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem) // zero instructions -(Zero [0] _ mem) -> mem -(Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem) -(Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem) +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore ptr (MOVWconst [0]) mem) -(Zero [2] ptr mem) -> +(Zero [2] ptr mem) => (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) -(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore ptr (MOVWconst [0]) mem) -(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) -(Zero [4] ptr mem) -> +(Zero [4] ptr mem) => (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))) -(Zero [3] ptr mem) -> +(Zero [3] ptr mem) => (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))) -(Zero [6] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))) -(Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)) -(Zero [12] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))) -(Zero [16] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Zero [16] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) @@ -269,51 +269,51 @@ // large or unaligned zeroing uses a loop (Zero [s] {t} ptr mem) - && (s > 16 || t.(*types.Type).Alignment()%4 != 0) -> - (LoweredZero [t.(*types.Type).Alignment()] + && (s > 16 || t.Alignment()%4 != 0) => + (LoweredZero [int32(t.Alignment())] ptr - (ADDconst ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) + (ADDconst ptr [int32(s-moveSize(t.Alignment(), config))]) mem) // moves -(Move [0] _ _ mem) -> mem -(Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem) -(Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem) +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore dst (MOVHUload src mem) mem) -(Move [2] dst src mem) -> +(Move [2] dst src mem) => (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) -(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore dst (MOVWload src mem) mem) -(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) -(Move [4] dst src mem) -> +(Move [4] dst src mem) => (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))) -(Move [3] dst src mem) -> +(Move [3] dst src mem) => (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))) -(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) -(Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) -(Move [6] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) -(Move [12] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) -(Move [16] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Move [16] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) @@ -322,29 +322,29 @@ // large or unaligned move uses a loop (Move [s] {t} dst src mem) - && (s > 16 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%4 != 0) -> - (LoweredMove [t.(*types.Type).Alignment()] + && (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) => + (LoweredMove [int32(t.Alignment())] dst src - (ADDconst src [s-moveSize(t.(*types.Type).Alignment(), config)]) + (ADDconst src [int32(s-moveSize(t.Alignment(), config))]) mem) // calls -(StaticCall ...) -> (CALLstatic ...) -(ClosureCall ...) -> (CALLclosure ...) -(InterCall ...) -> (CALLinter ...) +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) // atomic intrinsics -(AtomicLoad(8|32) ...) -> (LoweredAtomicLoad(8|32) ...) -(AtomicLoadPtr ...) -> (LoweredAtomicLoad32 ...) +(AtomicLoad(8|32) ...) => (LoweredAtomicLoad(8|32) ...) +(AtomicLoadPtr ...) => (LoweredAtomicLoad32 ...) -(AtomicStore(8|32) ...) -> (LoweredAtomicStore(8|32) ...) -(AtomicStorePtrNoWB ...) -> (LoweredAtomicStore32 ...) +(AtomicStore(8|32) ...) => (LoweredAtomicStore(8|32) ...) +(AtomicStorePtrNoWB ...) => (LoweredAtomicStore32 ...) -(AtomicExchange32 ...) -> (LoweredAtomicExchange ...) -(AtomicAdd32 ...) -> (LoweredAtomicAdd ...) +(AtomicExchange32 ...) => (LoweredAtomicExchange ...) +(AtomicAdd32 ...) => (LoweredAtomicAdd ...) -(AtomicCompareAndSwap32 ...) -> (LoweredAtomicCas ...) +(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas ...) // AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8)) (AtomicOr8 ptr val mem) && !config.BigEndian -> diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index b2fc669310..e7adbad045 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -28,8 +28,7 @@ func rewriteValueMIPS(v *Value) bool { v.Op = OpMIPSADD return true case OpAddr: - v.Op = OpMIPSMOVWaddr - return true + return rewriteValueMIPS_OpAddr(v) case OpAnd16: v.Op = OpMIPSAND return true @@ -87,11 +86,9 @@ func rewriteValueMIPS(v *Value) bool { case OpCom8: return rewriteValueMIPS_OpCom8(v) case OpConst16: - v.Op = OpMIPSMOVWconst - return true + return rewriteValueMIPS_OpConst16(v) case OpConst32: - v.Op = OpMIPSMOVWconst - return true + return rewriteValueMIPS_OpConst32(v) case OpConst32F: v.Op = OpMIPSMOVFconst return true @@ -99,11 +96,9 @@ func rewriteValueMIPS(v *Value) bool { v.Op = OpMIPSMOVDconst return true case OpConst8: - v.Op = OpMIPSMOVWconst - return true + return rewriteValueMIPS_OpConst8(v) case OpConstBool: - v.Op = OpMIPSMOVWconst - return true + return rewriteValueMIPS_OpConstBool(v) case OpConstNil: return rewriteValueMIPS_OpConstNil(v) case OpCtz32: @@ -594,6 +589,19 @@ func rewriteValueMIPS_OpAdd32withcarry(v *Value) bool { return true } } +func rewriteValueMIPS_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVWaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpMIPSMOVWaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] @@ -775,7 +783,7 @@ func rewriteValueMIPS_OpAvg32u(v *Value) bool { y := v_1 v.reset(OpMIPSADD) v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpMIPSSUB, t) v1.AddArg2(x, y) v0.AddArg(v1) @@ -794,7 +802,7 @@ func rewriteValueMIPS_OpBitLen32(v *Value) bool { x := v_0 v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 32 + v0.AuxInt = int32ToAuxInt(32) v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t) v1.AddArg(x) v.AddArg2(v0, v1) @@ -808,7 +816,7 @@ func rewriteValueMIPS_OpCom16(v *Value) bool { for { x := v_0 v.reset(OpMIPSNORconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v.AddArg(x) return true } @@ -820,7 +828,7 @@ func rewriteValueMIPS_OpCom32(v *Value) bool { for { x := v_0 v.reset(OpMIPSNORconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v.AddArg(x) return true } @@ -832,17 +840,57 @@ func rewriteValueMIPS_OpCom8(v *Value) bool { for { x := v_0 v.reset(OpMIPSNORconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v.AddArg(x) return true } } +func rewriteValueMIPS_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueMIPS_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueMIPS_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVWconst [int32(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(val)) + return true + } +} +func rewriteValueMIPS_OpConstBool(v *Value) bool { + // match: (ConstBool [b]) + // result: (MOVWconst [int32(b2i(b))]) + for { + b := auxIntToBool(v.AuxInt) + v.reset(OpMIPSMOVWconst) + v.AuxInt = int32ToAuxInt(int32(b2i(b))) + return true + } +} func rewriteValueMIPS_OpConstNil(v *Value) bool { // match: (ConstNil) // result: (MOVWconst [0]) for { v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } } @@ -857,10 +905,10 @@ func rewriteValueMIPS_OpCtz32(v *Value) bool { x := v_0 v.reset(OpMIPSSUB) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 32 + v0.AuxInt = int32ToAuxInt(32) v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t) v2 := b.NewValue0(v.Pos, OpMIPSSUBconst, t) - v2.AuxInt = 1 + v2.AuxInt = int32ToAuxInt(1) v3 := b.NewValue0(v.Pos, OpMIPSAND, t) v4 := b.NewValue0(v.Pos, OpMIPSNEG, t) v4.AddArg(x) @@ -1000,7 +1048,7 @@ func rewriteValueMIPS_OpEq16(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSGTUconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) @@ -1022,7 +1070,7 @@ func rewriteValueMIPS_OpEq32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSGTUconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v0.AddArg2(x, y) v.AddArg(v0) @@ -1072,7 +1120,7 @@ func rewriteValueMIPS_OpEq8(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSGTUconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) @@ -1094,7 +1142,7 @@ func rewriteValueMIPS_OpEqB(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool) v0.AddArg2(x, y) v.AddArg(v0) @@ -1112,7 +1160,7 @@ func rewriteValueMIPS_OpEqPtr(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSSGTUconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v0.AddArg2(x, y) v.AddArg(v0) @@ -1210,7 +1258,7 @@ func rewriteValueMIPS_OpLeq16(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) @@ -1232,7 +1280,7 @@ func rewriteValueMIPS_OpLeq16U(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) @@ -1254,7 +1302,7 @@ func rewriteValueMIPS_OpLeq32(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) v0.AddArg2(x, y) v.AddArg(v0) @@ -1288,7 +1336,7 @@ func rewriteValueMIPS_OpLeq32U(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v0.AddArg2(x, y) v.AddArg(v0) @@ -1322,7 +1370,7 @@ func rewriteValueMIPS_OpLeq8(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) @@ -1344,7 +1392,7 @@ func rewriteValueMIPS_OpLeq8U(v *Value) bool { x := v_0 y := v_1 v.reset(OpMIPSXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) @@ -1611,10 +1659,10 @@ func rewriteValueMIPS_OpLocalAddr(v *Value) bool { // match: (LocalAddr {sym} base _) // result: (MOVWaddr {sym} base) for { - sym := v.Aux + sym := auxToSym(v.Aux) base := v_0 v.reset(OpMIPSMOVWaddr) - v.Aux = sym + v.Aux = symToAux(sym) v.AddArg(base) return true } @@ -1636,9 +1684,9 @@ func rewriteValueMIPS_OpLsh16x16(v *Value) bool { v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -1661,9 +1709,9 @@ func rewriteValueMIPS_OpLsh16x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v2.AuxInt = 32 + v2.AuxInt = int32ToAuxInt(32) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true @@ -1674,18 +1722,18 @@ func rewriteValueMIPS_OpLsh16x64(v *Value) bool { v_0 := v.Args[0] // match: (Lsh16x64 x (Const64 [c])) // cond: uint32(c) < 16 - // result: (SLLconst x [c]) + // result: (SLLconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 16) { break } v.reset(OpMIPSSLLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -1696,12 +1744,12 @@ func rewriteValueMIPS_OpLsh16x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 16) { break } v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -1723,9 +1771,9 @@ func rewriteValueMIPS_OpLsh16x8(v *Value) bool { v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -1750,9 +1798,9 @@ func rewriteValueMIPS_OpLsh32x16(v *Value) bool { v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -1775,9 +1823,9 @@ func rewriteValueMIPS_OpLsh32x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v2.AuxInt = 32 + v2.AuxInt = int32ToAuxInt(32) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true @@ -1788,18 +1836,18 @@ func rewriteValueMIPS_OpLsh32x64(v *Value) bool { v_0 := v.Args[0] // match: (Lsh32x64 x (Const64 [c])) // cond: uint32(c) < 32 - // result: (SLLconst x [c]) + // result: (SLLconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 32) { break } v.reset(OpMIPSSLLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -1810,12 +1858,12 @@ func rewriteValueMIPS_OpLsh32x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 32) { break } v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -1837,9 +1885,9 @@ func rewriteValueMIPS_OpLsh32x8(v *Value) bool { v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -1864,9 +1912,9 @@ func rewriteValueMIPS_OpLsh8x16(v *Value) bool { v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -1889,9 +1937,9 @@ func rewriteValueMIPS_OpLsh8x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v2.AuxInt = 32 + v2.AuxInt = int32ToAuxInt(32) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true @@ -1902,18 +1950,18 @@ func rewriteValueMIPS_OpLsh8x64(v *Value) bool { v_0 := v.Args[0] // match: (Lsh8x64 x (Const64 [c])) // cond: uint32(c) < 8 - // result: (SLLconst x [c]) + // result: (SLLconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 8) { break } v.reset(OpMIPSSLLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -1924,12 +1972,12 @@ func rewriteValueMIPS_OpLsh8x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 8) { break } v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -1951,9 +1999,9 @@ func rewriteValueMIPS_OpLsh8x8(v *Value) bool { v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -4855,7 +4903,7 @@ func rewriteValueMIPS_OpMove(v *Value) bool { // match: (Move [0] _ _ mem) // result: mem for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } mem := v_2 @@ -4865,7 +4913,7 @@ func rewriteValueMIPS_OpMove(v *Value) bool { // match: (Move [1] dst src mem) // result: (MOVBstore dst (MOVBUload src mem) mem) for { - if v.AuxInt != 1 { + if auxIntToInt64(v.AuxInt) != 1 { break } dst := v_0 @@ -4878,17 +4926,17 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [2] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore dst (MOVHUload src mem) mem) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPSMOVHstore) @@ -4900,16 +4948,16 @@ func rewriteValueMIPS_OpMove(v *Value) bool { // match: (Move [2] dst src mem) // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpMIPSMOVBstore) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) @@ -4919,17 +4967,17 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [4] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore dst (MOVWload src mem) mem) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPSMOVWstore) @@ -4939,23 +4987,23 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [4] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPSMOVHstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) - v0.AuxInt = 2 + v0.AuxInt = int32ToAuxInt(2) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16) @@ -4967,26 +5015,26 @@ func rewriteValueMIPS_OpMove(v *Value) bool { // match: (Move [4] dst src mem) // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpMIPSMOVBstore) - v.AuxInt = 3 + v.AuxInt = int32ToAuxInt(3) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v0.AuxInt = 3 + v0.AuxInt = int32ToAuxInt(3) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v1.AuxInt = 2 + v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v2.AuxInt = 2 + v2.AuxInt = int32ToAuxInt(2) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v3.AuxInt = 1 + v3.AuxInt = int32ToAuxInt(1) v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v4.AuxInt = 1 + v4.AuxInt = int32ToAuxInt(1) v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) @@ -5000,21 +5048,21 @@ func rewriteValueMIPS_OpMove(v *Value) bool { // match: (Move [3] dst src mem) // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))) for { - if v.AuxInt != 3 { + if auxIntToInt64(v.AuxInt) != 3 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpMIPSMOVBstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v0.AuxInt = 2 + v0.AuxInt = int32ToAuxInt(2) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v1.AuxInt = 1 + v1.AuxInt = int32ToAuxInt(1) v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) - v2.AuxInt = 1 + v2.AuxInt = int32ToAuxInt(1) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8) @@ -5025,23 +5073,23 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [8] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPSMOVWstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v0.AuxInt = 4 + v0.AuxInt = int32ToAuxInt(4) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) @@ -5051,33 +5099,33 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [8] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPSMOVHstore) - v.AuxInt = 6 + v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v0.AuxInt = 6 + v0.AuxInt = int32ToAuxInt(6) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v2.AuxInt = 4 + v2.AuxInt = int32ToAuxInt(4) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v3.AuxInt = 2 + v3.AuxInt = int32ToAuxInt(2) v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v4.AuxInt = 2 + v4.AuxInt = int32ToAuxInt(2) v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) @@ -5089,28 +5137,28 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [6] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) for { - if v.AuxInt != 6 { + if auxIntToInt64(v.AuxInt) != 6 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPSMOVHstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v0.AuxInt = 4 + v0.AuxInt = int32ToAuxInt(4) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v1.AuxInt = 2 + v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) - v2.AuxInt = 2 + v2.AuxInt = int32ToAuxInt(2) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16) @@ -5121,28 +5169,28 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [12] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) for { - if v.AuxInt != 12 { + if auxIntToInt64(v.AuxInt) != 12 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPSMOVWstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v0.AuxInt = 8 + v0.AuxInt = int32ToAuxInt(8) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v2.AuxInt = 4 + v2.AuxInt = int32ToAuxInt(4) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) @@ -5153,33 +5201,33 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [16] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))) for { - if v.AuxInt != 16 { + if auxIntToInt64(v.AuxInt) != 16 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPSMOVWstore) - v.AuxInt = 12 + v.AuxInt = int32ToAuxInt(12) v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v0.AuxInt = 12 + v0.AuxInt = int32ToAuxInt(12) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v1.AuxInt = 8 + v1.AuxInt = int32ToAuxInt(8) v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v2.AuxInt = 8 + v2.AuxInt = int32ToAuxInt(8) v2.AddArg2(src, mem) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v3.AuxInt = 4 + v3.AuxInt = int32ToAuxInt(4) v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) - v4.AuxInt = 4 + v4.AuxInt = int32ToAuxInt(4) v4.AddArg2(src, mem) v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32) @@ -5191,21 +5239,21 @@ func rewriteValueMIPS_OpMove(v *Value) bool { return true } // match: (Move [s] {t} dst src mem) - // cond: (s > 16 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%4 != 0) - // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDconst src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem) + // cond: (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) + // result: (LoweredMove [int32(t.Alignment())] dst src (ADDconst src [int32(s-moveSize(t.Alignment(), config))]) mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(s > 16 && logLargeCopy(v, s) || t.(*types.Type).Alignment()%4 != 0) { + if !(s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) { break } v.reset(OpMIPSLoweredMove) - v.AuxInt = t.(*types.Type).Alignment() + v.AuxInt = int32ToAuxInt(int32(t.Alignment())) v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type) - v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config))) v0.AddArg(src) v.AddArg4(dst, src, v0, mem) return true @@ -5230,7 +5278,7 @@ func rewriteValueMIPS_OpNeq16(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v.AddArg2(v0, v3) return true } @@ -5249,7 +5297,7 @@ func rewriteValueMIPS_OpNeq32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v.AddArg2(v0, v1) return true } @@ -5304,7 +5352,7 @@ func rewriteValueMIPS_OpNeq8(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v.AddArg2(v0, v3) return true } @@ -5323,7 +5371,7 @@ func rewriteValueMIPS_OpNeqPtr(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v.AddArg2(v0, v1) return true } @@ -5335,7 +5383,7 @@ func rewriteValueMIPS_OpNot(v *Value) bool { for { x := v_0 v.reset(OpMIPSXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v.AddArg(x) return true } @@ -5343,25 +5391,25 @@ func rewriteValueMIPS_OpNot(v *Value) bool { func rewriteValueMIPS_OpOffPtr(v *Value) bool { v_0 := v.Args[0] // match: (OffPtr [off] ptr:(SP)) - // result: (MOVWaddr [off] ptr) + // result: (MOVWaddr [int32(off)] ptr) for { - off := v.AuxInt + off := auxIntToInt64(v.AuxInt) ptr := v_0 if ptr.Op != OpSP { break } v.reset(OpMIPSMOVWaddr) - v.AuxInt = off + v.AuxInt = int32ToAuxInt(int32(off)) v.AddArg(ptr) return true } // match: (OffPtr [off] ptr) - // result: (ADDconst [off] ptr) + // result: (ADDconst [int32(off)] ptr) for { - off := v.AuxInt + off := auxIntToInt64(v.AuxInt) ptr := v_0 v.reset(OpMIPSADDconst) - v.AuxInt = off + v.AuxInt = int32ToAuxInt(int32(off)) v.AddArg(ptr) return true } @@ -5491,15 +5539,15 @@ func rewriteValueMIPS_OpRotateLeft16(v *Value) bool { if v_1.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x32, t) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = c & 15 + v1.AuxInt = int32ToAuxInt(c & 15) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = -c & 15 + v3.AuxInt = int32ToAuxInt(-c & 15) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5519,15 +5567,15 @@ func rewriteValueMIPS_OpRotateLeft32(v *Value) bool { if v_1.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpOr32) v0 := b.NewValue0(v.Pos, OpLsh32x32, t) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = c & 31 + v1.AuxInt = int32ToAuxInt(c & 31) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh32Ux32, t) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = -c & 31 + v3.AuxInt = int32ToAuxInt(-c & 31) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5547,15 +5595,15 @@ func rewriteValueMIPS_OpRotateLeft64(v *Value) bool { if v_1.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpOr64) v0 := b.NewValue0(v.Pos, OpLsh64x32, t) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = c & 63 + v1.AuxInt = int32ToAuxInt(c & 63) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh64Ux32, t) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = -c & 63 + v3.AuxInt = int32ToAuxInt(-c & 63) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5575,15 +5623,15 @@ func rewriteValueMIPS_OpRotateLeft8(v *Value) bool { if v_1.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x32, t) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = c & 7 + v1.AuxInt = int32ToAuxInt(c & 7) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = -c & 7 + v3.AuxInt = int32ToAuxInt(-c & 7) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true @@ -5609,9 +5657,9 @@ func rewriteValueMIPS_OpRsh16Ux16(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v4.AuxInt = 32 + v4.AuxInt = int32ToAuxInt(32) v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) @@ -5636,9 +5684,9 @@ func rewriteValueMIPS_OpRsh16Ux32(v *Value) bool { v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v3.AddArg(y) v.AddArg3(v0, v2, v3) return true @@ -5651,20 +5699,20 @@ func rewriteValueMIPS_OpRsh16Ux64(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh16Ux64 x (Const64 [c])) // cond: uint32(c) < 16 - // result: (SRLconst (SLLconst x [16]) [c+16]) + // result: (SRLconst (SLLconst x [16]) [int32(c+16)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 16) { break } v.reset(OpMIPSSRLconst) - v.AuxInt = c + 16 + v.AuxInt = int32ToAuxInt(int32(c + 16)) v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) - v0.AuxInt = 16 + v0.AuxInt = int32ToAuxInt(16) v0.AddArg(x) v.AddArg(v0) return true @@ -5676,12 +5724,12 @@ func rewriteValueMIPS_OpRsh16Ux64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 16) { break } v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -5705,9 +5753,9 @@ func rewriteValueMIPS_OpRsh16Ux8(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v4.AuxInt = 32 + v4.AuxInt = int32ToAuxInt(32) v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) @@ -5732,9 +5780,9 @@ func rewriteValueMIPS_OpRsh16x16(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = -1 + v3.AuxInt = int32ToAuxInt(-1) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v4.AuxInt = 32 + v4.AuxInt = int32ToAuxInt(32) v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) @@ -5758,9 +5806,9 @@ func rewriteValueMIPS_OpRsh16x32(v *Value) bool { v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = -1 + v2.AuxInt = int32ToAuxInt(-1) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v3.AddArg(y) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) @@ -5774,20 +5822,20 @@ func rewriteValueMIPS_OpRsh16x64(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh16x64 x (Const64 [c])) // cond: uint32(c) < 16 - // result: (SRAconst (SLLconst x [16]) [c+16]) + // result: (SRAconst (SLLconst x [16]) [int32(c+16)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 16) { break } v.reset(OpMIPSSRAconst) - v.AuxInt = c + 16 + v.AuxInt = int32ToAuxInt(int32(c + 16)) v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) - v0.AuxInt = 16 + v0.AuxInt = int32ToAuxInt(16) v0.AddArg(x) v.AddArg(v0) return true @@ -5800,14 +5848,14 @@ func rewriteValueMIPS_OpRsh16x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 16) { break } v.reset(OpMIPSSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) - v0.AuxInt = 16 + v0.AuxInt = int32ToAuxInt(16) v0.AddArg(x) v.AddArg(v0) return true @@ -5831,9 +5879,9 @@ func rewriteValueMIPS_OpRsh16x8(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = -1 + v3.AuxInt = int32ToAuxInt(-1) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v4.AuxInt = 32 + v4.AuxInt = int32ToAuxInt(32) v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) @@ -5859,9 +5907,9 @@ func rewriteValueMIPS_OpRsh32Ux16(v *Value) bool { v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -5884,9 +5932,9 @@ func rewriteValueMIPS_OpRsh32Ux32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpMIPSSRL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v2.AuxInt = 32 + v2.AuxInt = int32ToAuxInt(32) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true @@ -5897,18 +5945,18 @@ func rewriteValueMIPS_OpRsh32Ux64(v *Value) bool { v_0 := v.Args[0] // match: (Rsh32Ux64 x (Const64 [c])) // cond: uint32(c) < 32 - // result: (SRLconst x [c]) + // result: (SRLconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 32) { break } v.reset(OpMIPSSRLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -5919,12 +5967,12 @@ func rewriteValueMIPS_OpRsh32Ux64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 32) { break } v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -5946,9 +5994,9 @@ func rewriteValueMIPS_OpRsh32Ux8(v *Value) bool { v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -5971,9 +6019,9 @@ func rewriteValueMIPS_OpRsh32x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = -1 + v2.AuxInt = int32ToAuxInt(-1) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -5995,9 +6043,9 @@ func rewriteValueMIPS_OpRsh32x32(v *Value) bool { v.reset(OpMIPSSRA) v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = -1 + v1.AuxInt = int32ToAuxInt(-1) v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v2.AuxInt = 32 + v2.AuxInt = int32ToAuxInt(32) v2.AddArg(y) v0.AddArg3(y, v1, v2) v.AddArg2(x, v0) @@ -6009,18 +6057,18 @@ func rewriteValueMIPS_OpRsh32x64(v *Value) bool { v_0 := v.Args[0] // match: (Rsh32x64 x (Const64 [c])) // cond: uint32(c) < 32 - // result: (SRAconst x [c]) + // result: (SRAconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 32) { break } v.reset(OpMIPSSRAconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -6032,12 +6080,12 @@ func rewriteValueMIPS_OpRsh32x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 32) { break } v.reset(OpMIPSSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v.AddArg(x) return true } @@ -6058,9 +6106,9 @@ func rewriteValueMIPS_OpRsh32x8(v *Value) bool { v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = -1 + v2.AuxInt = int32ToAuxInt(-1) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v4.AddArg(y) v3.AddArg(v4) @@ -6088,9 +6136,9 @@ func rewriteValueMIPS_OpRsh8Ux16(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v4.AuxInt = 32 + v4.AuxInt = int32ToAuxInt(32) v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) @@ -6115,9 +6163,9 @@ func rewriteValueMIPS_OpRsh8Ux32(v *Value) bool { v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v3.AddArg(y) v.AddArg3(v0, v2, v3) return true @@ -6130,20 +6178,20 @@ func rewriteValueMIPS_OpRsh8Ux64(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh8Ux64 x (Const64 [c])) // cond: uint32(c) < 8 - // result: (SRLconst (SLLconst x [24]) [c+24]) + // result: (SRLconst (SLLconst x [24]) [int32(c+24)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 8) { break } v.reset(OpMIPSSRLconst) - v.AuxInt = c + 24 + v.AuxInt = int32ToAuxInt(int32(c + 24)) v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) - v0.AuxInt = 24 + v0.AuxInt = int32ToAuxInt(24) v0.AddArg(x) v.AddArg(v0) return true @@ -6155,12 +6203,12 @@ func rewriteValueMIPS_OpRsh8Ux64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 8) { break } v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -6184,9 +6232,9 @@ func rewriteValueMIPS_OpRsh8Ux8(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v4.AuxInt = 32 + v4.AuxInt = int32ToAuxInt(32) v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) @@ -6211,9 +6259,9 @@ func rewriteValueMIPS_OpRsh8x16(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = -1 + v3.AuxInt = int32ToAuxInt(-1) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v4.AuxInt = 32 + v4.AuxInt = int32ToAuxInt(32) v5 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) @@ -6237,9 +6285,9 @@ func rewriteValueMIPS_OpRsh8x32(v *Value) bool { v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = -1 + v2.AuxInt = int32ToAuxInt(-1) v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v3.AuxInt = 32 + v3.AuxInt = int32ToAuxInt(32) v3.AddArg(y) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) @@ -6253,20 +6301,20 @@ func rewriteValueMIPS_OpRsh8x64(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh8x64 x (Const64 [c])) // cond: uint32(c) < 8 - // result: (SRAconst (SLLconst x [24]) [c+24]) + // result: (SRAconst (SLLconst x [24]) [int32(c+24)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) < 8) { break } v.reset(OpMIPSSRAconst) - v.AuxInt = c + 24 + v.AuxInt = int32ToAuxInt(int32(c + 24)) v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) - v0.AuxInt = 24 + v0.AuxInt = int32ToAuxInt(24) v0.AddArg(x) v.AddArg(v0) return true @@ -6279,14 +6327,14 @@ func rewriteValueMIPS_OpRsh8x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) >= 8) { break } v.reset(OpMIPSSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32) - v0.AuxInt = 24 + v0.AuxInt = int32ToAuxInt(24) v0.AddArg(x) v.AddArg(v0) return true @@ -6310,9 +6358,9 @@ func rewriteValueMIPS_OpRsh8x8(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v3.AuxInt = -1 + v3.AuxInt = int32ToAuxInt(-1) v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool) - v4.AuxInt = 32 + v4.AuxInt = int32ToAuxInt(32) v5 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v5.AddArg(y) v4.AddArg(v5) @@ -6703,7 +6751,7 @@ func rewriteValueMIPS_OpSignmask(v *Value) bool { for { x := v_0 v.reset(OpMIPSSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v.AddArg(x) return true } @@ -6717,7 +6765,7 @@ func rewriteValueMIPS_OpSlicemask(v *Value) bool { t := v.Type x := v_0 v.reset(OpMIPSSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v0 := b.NewValue0(v.Pos, OpMIPSNEG, t) v0.AddArg(x) v.AddArg(v0) @@ -6729,14 +6777,14 @@ func rewriteValueMIPS_OpStore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 1 + // cond: t.Size() == 1 // result: (MOVBstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 1) { + if !(t.Size() == 1) { break } v.reset(OpMIPSMOVBstore) @@ -6744,14 +6792,14 @@ func rewriteValueMIPS_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 2 + // cond: t.Size() == 2 // result: (MOVHstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 2) { + if !(t.Size() == 2) { break } v.reset(OpMIPSMOVHstore) @@ -6759,14 +6807,14 @@ func rewriteValueMIPS_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) + // cond: t.Size() == 4 && !is32BitFloat(val.Type) // result: (MOVWstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { + if !(t.Size() == 4 && !is32BitFloat(val.Type)) { break } v.reset(OpMIPSMOVWstore) @@ -6774,14 +6822,14 @@ func rewriteValueMIPS_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) + // cond: t.Size() == 4 && is32BitFloat(val.Type) // result: (MOVFstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { + if !(t.Size() == 4 && is32BitFloat(val.Type)) { break } v.reset(OpMIPSMOVFstore) @@ -6789,14 +6837,14 @@ func rewriteValueMIPS_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) + // cond: t.Size() == 8 && is64BitFloat(val.Type) // result: (MOVDstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { + if !(t.Size() == 8 && is64BitFloat(val.Type)) { break } v.reset(OpMIPSMOVDstore) @@ -6833,7 +6881,7 @@ func rewriteValueMIPS_OpZero(v *Value) bool { // match: (Zero [0] _ mem) // result: mem for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } mem := v_1 @@ -6843,96 +6891,96 @@ func rewriteValueMIPS_OpZero(v *Value) bool { // match: (Zero [1] ptr mem) // result: (MOVBstore ptr (MOVWconst [0]) mem) for { - if v.AuxInt != 1 { + if auxIntToInt64(v.AuxInt) != 1 { break } ptr := v_0 mem := v_1 v.reset(OpMIPSMOVBstore) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore ptr (MOVWconst [0]) mem) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPSMOVHstore) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } ptr := v_0 mem := v_1 v.reset(OpMIPSMOVBstore) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v2, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore ptr (MOVWconst [0]) mem) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPSMOVWstore) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPSMOVHstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v2, mem) v.AddArg3(ptr, v0, v1) return true @@ -6940,27 +6988,27 @@ func rewriteValueMIPS_OpZero(v *Value) bool { // match: (Zero [4] ptr mem) // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } ptr := v_0 mem := v_1 v.reset(OpMIPSMOVBstore) - v.AuxInt = 3 + v.AuxInt = int32ToAuxInt(3) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v1.AuxInt = 2 + v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v3.AuxInt = 1 + v3.AuxInt = int32ToAuxInt(1) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v4.AuxInt = 0 + v4.AuxInt = int32ToAuxInt(0) v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v5.AuxInt = 0 + v5.AuxInt = int32ToAuxInt(0) v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v6.AuxInt = 0 + v6.AuxInt = int32ToAuxInt(0) v5.AddArg3(ptr, v6, mem) v3.AddArg3(ptr, v4, v5) v1.AddArg3(ptr, v2, v3) @@ -6970,142 +7018,142 @@ func rewriteValueMIPS_OpZero(v *Value) bool { // match: (Zero [3] ptr mem) // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))) for { - if v.AuxInt != 3 { + if auxIntToInt64(v.AuxInt) != 3 { break } ptr := v_0 mem := v_1 v.reset(OpMIPSMOVBstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v1.AuxInt = 1 + v1.AuxInt = int32ToAuxInt(1) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v4.AuxInt = 0 + v4.AuxInt = int32ToAuxInt(0) v3.AddArg3(ptr, v4, mem) v1.AddArg3(ptr, v2, v3) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))) for { - if v.AuxInt != 6 { + if auxIntToInt64(v.AuxInt) != 6 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpMIPSMOVHstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v1.AuxInt = 2 + v1.AuxInt = int32ToAuxInt(2) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v4.AuxInt = 0 + v4.AuxInt = int32ToAuxInt(0) v3.AddArg3(ptr, v4, mem) v1.AddArg3(ptr, v2, v3) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)) for { - if v.AuxInt != 8 { + if auxIntToInt64(v.AuxInt) != 8 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPSMOVWstore) - v.AuxInt = 4 + v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v2, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))) for { - if v.AuxInt != 12 { + if auxIntToInt64(v.AuxInt) != 12 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPSMOVWstore) - v.AuxInt = 8 + v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v1.AuxInt = 4 + v1.AuxInt = int32ToAuxInt(4) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v4.AuxInt = 0 + v4.AuxInt = int32ToAuxInt(0) v3.AddArg3(ptr, v4, mem) v1.AddArg3(ptr, v2, v3) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))) for { - if v.AuxInt != 16 { + if auxIntToInt64(v.AuxInt) != 16 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpMIPSMOVWstore) - v.AuxInt = 12 + v.AuxInt = int32ToAuxInt(12) v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v1.AuxInt = 8 + v1.AuxInt = int32ToAuxInt(8) v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v2.AuxInt = 0 + v2.AuxInt = int32ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v3.AuxInt = 4 + v3.AuxInt = int32ToAuxInt(4) v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v4.AuxInt = 0 + v4.AuxInt = int32ToAuxInt(0) v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem) - v5.AuxInt = 0 + v5.AuxInt = int32ToAuxInt(0) v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v6.AuxInt = 0 + v6.AuxInt = int32ToAuxInt(0) v5.AddArg3(ptr, v6, mem) v3.AddArg3(ptr, v4, v5) v1.AddArg3(ptr, v2, v3) @@ -7113,20 +7161,20 @@ func rewriteValueMIPS_OpZero(v *Value) bool { return true } // match: (Zero [s] {t} ptr mem) - // cond: (s > 16 || t.(*types.Type).Alignment()%4 != 0) - // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDconst ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) mem) + // cond: (s > 16 || t.Alignment()%4 != 0) + // result: (LoweredZero [int32(t.Alignment())] ptr (ADDconst ptr [int32(s-moveSize(t.Alignment(), config))]) mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(s > 16 || t.(*types.Type).Alignment()%4 != 0) { + if !(s > 16 || t.Alignment()%4 != 0) { break } v.reset(OpMIPSLoweredZero) - v.AuxInt = t.(*types.Type).Alignment() + v.AuxInt = int32ToAuxInt(int32(t.Alignment())) v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type) - v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config))) v0.AddArg(ptr) v.AddArg3(ptr, v0, mem) return true @@ -7144,7 +7192,7 @@ func rewriteValueMIPS_OpZeromask(v *Value) bool { v.reset(OpMIPSNEG) v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool) v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v0.AddArg2(x, v1) v.AddArg(v0) return true -- 2.48.1