(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas ...)
-// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
-(AtomicOr8 ptr val mem) && !config.BigEndian ->
+// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
+(AtomicOr8 ptr val mem) && !config.BigEndian =>
(LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
(SLL <typ.UInt32> (ZeroExt8to32 val)
(SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3] ptr))) mem)
-// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
-(AtomicAnd8 ptr val mem) && !config.BigEndian ->
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
+(AtomicAnd8 ptr val mem) && !config.BigEndian =>
(LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
(OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
(SLLconst <typ.UInt32> [3]
(MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3] ptr))))) mem)
-// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
-(AtomicOr8 ptr val mem) && config.BigEndian ->
+// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
+(AtomicOr8 ptr val mem) && config.BigEndian =>
(LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
(SLL <typ.UInt32> (ZeroExt8to32 val)
(SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3]
(XORconst <typ.UInt32> [3] ptr)))) mem)
-// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
-(AtomicAnd8 ptr val mem) && config.BigEndian ->
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
+(AtomicAnd8 ptr val mem) && config.BigEndian =>
(LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
(OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
(SLLconst <typ.UInt32> [3]
// checks
-(NilCheck ...) -> (LoweredNilCheck ...)
-(IsNonNil ptr) -> (SGTU ptr (MOVWconst [0]))
-(IsInBounds idx len) -> (SGTU len idx)
-(IsSliceInBounds idx len) -> (XORconst [1] (SGTU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVWconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XORconst [1] (SGTU idx len))
// pseudo-ops
-(GetClosurePtr ...) -> (LoweredGetClosurePtr ...)
-(GetCallerSP ...) -> (LoweredGetCallerSP ...)
-(GetCallerPC ...) -> (LoweredGetCallerPC ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
-(If cond yes no) -> (NE cond yes no)
+(If cond yes no) => (NE cond yes no)
// Write barrier.
-(WB ...) -> (LoweredWB ...)
+(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 -> (LoweredPanicExtendA [kind] hi lo y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 -> (LoweredPanicExtendB [kind] hi lo y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 -> (LoweredPanicExtendC [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
// Optimizations
// Absorb boolean tests into block
-(NE (FPFlagTrue cmp) yes no) -> (FPT cmp yes no)
-(NE (FPFlagFalse cmp) yes no) -> (FPF cmp yes no)
-(EQ (FPFlagTrue cmp) yes no) -> (FPF cmp yes no)
-(EQ (FPFlagFalse cmp) yes no) -> (FPT cmp yes no)
-(NE (XORconst [1] cmp:(SGT _ _)) yes no) -> (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTU _ _)) yes no) -> (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTconst _)) yes no) -> (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTUconst _)) yes no) -> (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTzero _)) yes no) -> (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTUzero _)) yes no) -> (EQ cmp yes no)
-(EQ (XORconst [1] cmp:(SGT _ _)) yes no) -> (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) -> (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTconst _)) yes no) -> (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) -> (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTzero _)) yes no) -> (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTUzero _)) yes no) -> (NE cmp yes no)
-(NE (SGTUconst [1] x) yes no) -> (EQ x yes no)
-(EQ (SGTUconst [1] x) yes no) -> (NE x yes no)
-(NE (SGTUzero x) yes no) -> (NE x yes no)
-(EQ (SGTUzero x) yes no) -> (EQ x yes no)
-(NE (SGTconst [0] x) yes no) -> (LTZ x yes no)
-(EQ (SGTconst [0] x) yes no) -> (GEZ x yes no)
-(NE (SGTzero x) yes no) -> (GTZ x yes no)
-(EQ (SGTzero x) yes no) -> (LEZ x yes no)
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTzero _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUzero _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTzero _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUzero _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTUzero x) yes no) => (NE x yes no)
+(EQ (SGTUzero x) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGTzero x) yes no) => (GTZ x yes no)
+(EQ (SGTzero x) yes no) => (LEZ x yes no)
// fold offset into address
-(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) -> (MOVWaddr [off1+off2] {sym} ptr)
+(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr)
// fold address into load/store
-(MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBload [off1+off2] {sym} ptr mem)
-(MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBUload [off1+off2] {sym} ptr mem)
-(MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHload [off1+off2] {sym} ptr mem)
-(MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHUload [off1+off2] {sym} ptr mem)
-(MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWload [off1+off2] {sym} ptr mem)
-(MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVFload [off1+off2] {sym} ptr mem)
-(MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVDload [off1+off2] {sym} ptr mem)
-
-(MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBstore [off1+off2] {sym} ptr val mem)
-(MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHstore [off1+off2] {sym} ptr val mem)
-(MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWstore [off1+off2] {sym} ptr val mem)
-(MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVFstore [off1+off2] {sym} ptr val mem)
-(MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVDstore [off1+off2] {sym} ptr val mem)
-
-(MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
-(MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
-(MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
-
-(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-
-(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
- (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
- (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
- (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
- (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
- (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
- (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDload [off1+off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDstore [off1+off2] {sym} ptr val mem)
+
+(MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstorezero [off1+off2] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
-(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
-(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
-(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
-(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
-(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
-(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
-(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x)
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
// store zero
-(MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
-(MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
// don't extend after proper load
-(MOVBreg x:(MOVBload _ _)) -> (MOVWreg x)
-(MOVBUreg x:(MOVBUload _ _)) -> (MOVWreg x)
-(MOVHreg x:(MOVBload _ _)) -> (MOVWreg x)
-(MOVHreg x:(MOVBUload _ _)) -> (MOVWreg x)
-(MOVHreg x:(MOVHload _ _)) -> (MOVWreg x)
-(MOVHUreg x:(MOVBUload _ _)) -> (MOVWreg x)
-(MOVHUreg x:(MOVHUload _ _)) -> (MOVWreg x)
+(MOVBreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x)
// fold double extensions
-(MOVBreg x:(MOVBreg _)) -> (MOVWreg x)
-(MOVBUreg x:(MOVBUreg _)) -> (MOVWreg x)
-(MOVHreg x:(MOVBreg _)) -> (MOVWreg x)
-(MOVHreg x:(MOVBUreg _)) -> (MOVWreg x)
-(MOVHreg x:(MOVHreg _)) -> (MOVWreg x)
-(MOVHUreg x:(MOVBUreg _)) -> (MOVWreg x)
-(MOVHUreg x:(MOVHUreg _)) -> (MOVWreg x)
+(MOVBreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x)
// sign extended loads
// Note: The combined instruction must end up in the same block
// Make sure we don't combine these ops if the load has another use.
// This prevents a single load from being split into multiple loads
// which then might return different values. See test/atomicload.go.
-(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <t> [off] {sym} ptr mem)
-(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBUload <t> [off] {sym} ptr mem)
-(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload <t> [off] {sym} ptr mem)
-(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
+(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
+(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
// fold extensions and ANDs together
-(MOVBUreg (ANDconst [c] x)) -> (ANDconst [c&0xff] x)
-(MOVHUreg (ANDconst [c] x)) -> (ANDconst [c&0xffff] x)
-(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c&0x7f] x)
-(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c&0x7fff] x)
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x)
// don't extend before store
-(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
-(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
-(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
-(MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
-(MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
// if a register move has only 1 use, just use the same register without emitting instruction
// MOVWnop doesn't emit instruction, only for ensuring the type.
-(MOVWreg x) && x.Uses == 1 -> (MOVWnop x)
+(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
// fold constant into arithmatic ops
-(ADD x (MOVWconst [c])) -> (ADDconst [c] x)
-(SUB x (MOVWconst [c])) -> (SUBconst [c] x)
-(AND x (MOVWconst [c])) -> (ANDconst [c] x)
-(OR x (MOVWconst [c])) -> (ORconst [c] x)
-(XOR x (MOVWconst [c])) -> (XORconst [c] x)
-(NOR x (MOVWconst [c])) -> (NORconst [c] x)
-
-(SLL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
-(SRL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
-(SRA x (MOVWconst [c])) && uint32(c)>=32 -> (SRAconst x [31])
-(SLL x (MOVWconst [c])) -> (SLLconst x [c])
-(SRL x (MOVWconst [c])) -> (SRLconst x [c])
-(SRA x (MOVWconst [c])) -> (SRAconst x [c])
-
-(SGT (MOVWconst [c]) x) -> (SGTconst [c] x)
-(SGTU (MOVWconst [c]) x) -> (SGTUconst [c] x)
-(SGT x (MOVWconst [0])) -> (SGTzero x)
-(SGTU x (MOVWconst [0])) -> (SGTUzero x)
+(ADD x (MOVWconst [c])) => (ADDconst [c] x)
+(SUB x (MOVWconst [c])) => (SUBconst [c] x)
+(AND x (MOVWconst [c])) => (ANDconst [c] x)
+(OR x (MOVWconst [c])) => (ORconst [c] x)
+(XOR x (MOVWconst [c])) => (XORconst [c] x)
+(NOR x (MOVWconst [c])) => (NORconst [c] x)
+
+(SRA x (MOVWconst [c])) && c >= 32 => (SRAconst x [31])
+(SLL x (MOVWconst [c])) => (SLLconst x [c])
+(SRL x (MOVWconst [c])) => (SRLconst x [c])
+(SRA x (MOVWconst [c])) => (SRAconst x [c])
+
+(SGT (MOVWconst [c]) x) => (SGTconst [c] x)
+(SGTU (MOVWconst [c]) x) => (SGTUconst [c] x)
+(SGT x (MOVWconst [0])) => (SGTzero x)
+(SGTU x (MOVWconst [0])) => (SGTUzero x)
// mul with constant
-(Select1 (MULTU (MOVWconst [0]) _ )) -> (MOVWconst [0])
-(Select0 (MULTU (MOVWconst [0]) _ )) -> (MOVWconst [0])
-(Select1 (MULTU (MOVWconst [1]) x )) -> x
-(Select0 (MULTU (MOVWconst [1]) _ )) -> (MOVWconst [0])
-(Select1 (MULTU (MOVWconst [-1]) x )) -> (NEG <x.Type> x)
-(Select0 (MULTU (MOVWconst [-1]) x )) -> (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
-(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) -> (SLLconst [log2(int64(uint32(c)))] x)
-(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) -> (SRLconst [32-log2(int64(uint32(c)))] x)
-
-(MUL (MOVWconst [0]) _ ) -> (MOVWconst [0])
-(MUL (MOVWconst [1]) x ) -> x
-(MUL (MOVWconst [-1]) x ) -> (NEG x)
-(MUL (MOVWconst [c]) x ) && isPowerOfTwo(int64(uint32(c))) -> (SLLconst [log2(int64(uint32(c)))] x)
+(Select1 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
+(Select0 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
+(Select1 (MULTU (MOVWconst [1]) x )) => x
+(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0])
+(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG <x.Type> x)
+(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
+(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
+
+(MUL (MOVWconst [0]) _ ) => (MOVWconst [0])
+(MUL (MOVWconst [1]) x ) => x
+(MUL (MOVWconst [-1]) x ) => (NEG x)
+(MUL (MOVWconst [c]) x ) && isPowerOfTwo(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
// generic simplifications
-(ADD x (NEG y)) -> (SUB x y)
-(SUB x x) -> (MOVWconst [0])
-(SUB (MOVWconst [0]) x) -> (NEG x)
-(AND x x) -> x
-(OR x x) -> x
-(XOR x x) -> (MOVWconst [0])
+(ADD x (NEG y)) => (SUB x y)
+(SUB x x) => (MOVWconst [0])
+(SUB (MOVWconst [0]) x) => (NEG x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVWconst [0])
// miscellaneous patterns generated by dec64
-(AND (SGTUconst [1] x) (SGTUconst [1] y)) -> (SGTUconst [1] (OR <x.Type> x y))
-(OR (SGTUzero x) (SGTUzero y)) -> (SGTUzero (OR <x.Type> x y))
+(AND (SGTUconst [1] x) (SGTUconst [1] y)) => (SGTUconst [1] (OR <x.Type> x y))
+(OR (SGTUzero x) (SGTUzero y)) => (SGTUzero (OR <x.Type> x y))
// remove redundant *const ops
-(ADDconst [0] x) -> x
-(SUBconst [0] x) -> x
-(ANDconst [0] _) -> (MOVWconst [0])
-(ANDconst [-1] x) -> x
-(ORconst [0] x) -> x
-(ORconst [-1] _) -> (MOVWconst [-1])
-(XORconst [0] x) -> x
-(XORconst [-1] x) -> (NORconst [0] x)
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVWconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVWconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
// generic constant folding
-(ADDconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c+d))])
-(ADDconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(c+d))] x)
-(ADDconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x)
-(SUBconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d-c))])
-(SUBconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(-c-d))] x)
-(SUBconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(-c+d))] x)
-(SLLconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(uint32(d)<<uint32(c)))])
-(SRLconst [c] (MOVWconst [d])) -> (MOVWconst [int64(uint32(d)>>uint32(c))])
-(SRAconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)>>uint32(c))])
-(MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c)*int32(d))])
-(Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)*uint32(d)))])
-(Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [(c*d)>>32])
-(Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(c)/int32(d))])
-(Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)/uint32(d)))])
-(Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(c)%int32(d))])
-(Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)%uint32(d)))])
-(ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
-(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
-(ORconst [c] (MOVWconst [d])) -> (MOVWconst [c|d])
-(ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
-(XORconst [c] (MOVWconst [d])) -> (MOVWconst [c^d])
-(XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
-(NORconst [c] (MOVWconst [d])) -> (MOVWconst [^(c|d)])
-(NEG (MOVWconst [c])) -> (MOVWconst [int64(int32(-c))])
-(MOVBreg (MOVWconst [c])) -> (MOVWconst [int64(int8(c))])
-(MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))])
-(MOVHreg (MOVWconst [c])) -> (MOVWconst [int64(int16(c))])
-(MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))])
-(MOVWreg (MOVWconst [c])) -> (MOVWconst [c])
+(ADDconst [c] (MOVWconst [d])) => (MOVWconst [int32(c+d)])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d<<uint32(c)])
+(SRLconst [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)>>uint32(c))])
+(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint32(c)])
+(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d])
+(Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)*uint32(d))])
+(Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
+(Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [c/d])
+(Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)/uint32(d))])
+(Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [c%d])
+(Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(NORconst [c] (MOVWconst [d])) => (MOVWconst [^(c|d)])
+(NEG (MOVWconst [c])) => (MOVWconst [-c])
+(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
+(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
+(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))])
+(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))])
+(MOVWreg (MOVWconst [c])) => (MOVWconst [c])
// constant comparisons
-(SGTconst [c] (MOVWconst [d])) && int32(c) > int32(d) -> (MOVWconst [1])
-(SGTconst [c] (MOVWconst [d])) && int32(c) <= int32(d) -> (MOVWconst [0])
-(SGTUconst [c] (MOVWconst [d])) && uint32(c)>uint32(d) -> (MOVWconst [1])
-(SGTUconst [c] (MOVWconst [d])) && uint32(c)<=uint32(d) -> (MOVWconst [0])
-(SGTzero (MOVWconst [d])) && int32(d) > 0 -> (MOVWconst [1])
-(SGTzero (MOVWconst [d])) && int32(d) <= 0 -> (MOVWconst [0])
-(SGTUzero (MOVWconst [d])) && uint32(d) != 0 -> (MOVWconst [1])
-(SGTUzero (MOVWconst [d])) && uint32(d) == 0 -> (MOVWconst [0])
+(SGTconst [c] (MOVWconst [d])) && c > d => (MOVWconst [1])
+(SGTconst [c] (MOVWconst [d])) && c <= d => (MOVWconst [0])
+(SGTUconst [c] (MOVWconst [d])) && uint32(c) > uint32(d) => (MOVWconst [1])
+(SGTUconst [c] (MOVWconst [d])) && uint32(c) <= uint32(d) => (MOVWconst [0])
+(SGTzero (MOVWconst [d])) && d > 0 => (MOVWconst [1])
+(SGTzero (MOVWconst [d])) && d <= 0 => (MOVWconst [0])
+(SGTUzero (MOVWconst [d])) && d != 0 => (MOVWconst [1])
+(SGTUzero (MOVWconst [d])) && d == 0 => (MOVWconst [0])
// other known comparisons
-(SGTconst [c] (MOVBreg _)) && 0x7f < int32(c) -> (MOVWconst [1])
-(SGTconst [c] (MOVBreg _)) && int32(c) <= -0x80 -> (MOVWconst [0])
-(SGTconst [c] (MOVBUreg _)) && 0xff < int32(c) -> (MOVWconst [1])
-(SGTconst [c] (MOVBUreg _)) && int32(c) < 0 -> (MOVWconst [0])
-(SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) -> (MOVWconst [1])
-(SGTconst [c] (MOVHreg _)) && 0x7fff < int32(c) -> (MOVWconst [1])
-(SGTconst [c] (MOVHreg _)) && int32(c) <= -0x8000 -> (MOVWconst [0])
-(SGTconst [c] (MOVHUreg _)) && 0xffff < int32(c) -> (MOVWconst [1])
-(SGTconst [c] (MOVHUreg _)) && int32(c) < 0 -> (MOVWconst [0])
-(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) -> (MOVWconst [1])
-(SGTconst [c] (ANDconst [m] _)) && 0 <= int32(m) && int32(m) < int32(c) -> (MOVWconst [1])
-(SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) -> (MOVWconst [1])
-(SGTconst [c] (SRLconst _ [d])) && 0 <= int32(c) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) -> (MOVWconst [1])
-(SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) -> (MOVWconst [1])
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVWconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVWconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVWconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVWconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVWconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVWconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVWconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVWconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVWconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (SRLconst _ [d])) && 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
+(SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
// absorb constants into branches
-(EQ (MOVWconst [0]) yes no) -> (First yes no)
-(EQ (MOVWconst [c]) yes no) && c != 0 -> (First no yes)
-(NE (MOVWconst [0]) yes no) -> (First no yes)
-(NE (MOVWconst [c]) yes no) && c != 0 -> (First yes no)
-(LTZ (MOVWconst [c]) yes no) && int32(c) < 0 -> (First yes no)
-(LTZ (MOVWconst [c]) yes no) && int32(c) >= 0 -> (First no yes)
-(LEZ (MOVWconst [c]) yes no) && int32(c) <= 0 -> (First yes no)
-(LEZ (MOVWconst [c]) yes no) && int32(c) > 0 -> (First no yes)
-(GTZ (MOVWconst [c]) yes no) && int32(c) > 0 -> (First yes no)
-(GTZ (MOVWconst [c]) yes no) && int32(c) <= 0 -> (First no yes)
-(GEZ (MOVWconst [c]) yes no) && int32(c) >= 0 -> (First yes no)
-(GEZ (MOVWconst [c]) yes no) && int32(c) < 0 -> (First no yes)
+(EQ (MOVWconst [0]) yes no) => (First yes no)
+(EQ (MOVWconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVWconst [0]) yes no) => (First no yes)
+(NE (MOVWconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVWconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVWconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVWconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVWconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVWconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVWconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVWconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVWconst [c]) yes no) && c < 0 => (First no yes)
// conditional move
-(CMOVZ _ f (MOVWconst [0])) -> f
-(CMOVZ a _ (MOVWconst [c])) && c!=0 -> a
-(CMOVZzero _ (MOVWconst [0])) -> (MOVWconst [0])
-(CMOVZzero a (MOVWconst [c])) && c!=0 -> a
-(CMOVZ a (MOVWconst [0]) c) -> (CMOVZzero a c)
+(CMOVZ _ f (MOVWconst [0])) => f
+(CMOVZ a _ (MOVWconst [c])) && c!=0 => a
+(CMOVZzero _ (MOVWconst [0])) => (MOVWconst [0])
+(CMOVZzero a (MOVWconst [c])) && c!=0 => a
+(CMOVZ a (MOVWconst [0]) c) => (CMOVZzero a c)
// atomic
-(LoweredAtomicStore32 ptr (MOVWconst [0]) mem) -> (LoweredAtomicStorezero ptr mem)
-(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(c) -> (LoweredAtomicAddconst [c] ptr mem)
+(LoweredAtomicStore32 ptr (MOVWconst [0]) mem) => (LoweredAtomicStorezero ptr mem)
+(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(int64(c)) => (LoweredAtomicAddconst [c] ptr mem)
v.reset(OpMIPSLoweredAtomicAnd)
v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = ^3
+ v1.AuxInt = int32ToAuxInt(^3)
v0.AddArg2(v1, ptr)
v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(val)
v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v5.AuxInt = 3
+ v5.AuxInt = int32ToAuxInt(3)
v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
- v6.AuxInt = 3
+ v6.AuxInt = int32ToAuxInt(3)
v6.AddArg(ptr)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
- v7.AuxInt = 0
+ v7.AuxInt = int32ToAuxInt(0)
v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v9.AuxInt = 0xff
+ v9.AuxInt = int32ToAuxInt(0xff)
v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v10.AuxInt = 3
+ v10.AuxInt = int32ToAuxInt(3)
v11 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
- v11.AuxInt = 3
+ v11.AuxInt = int32ToAuxInt(3)
v11.AddArg(ptr)
v10.AddArg(v11)
v8.AddArg2(v9, v10)
v.reset(OpMIPSLoweredAtomicAnd)
v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = ^3
+ v1.AuxInt = int32ToAuxInt(^3)
v0.AddArg2(v1, ptr)
v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v4.AddArg(val)
v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v5.AuxInt = 3
+ v5.AuxInt = int32ToAuxInt(3)
v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
- v6.AuxInt = 3
+ v6.AuxInt = int32ToAuxInt(3)
v7 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
- v7.AuxInt = 3
+ v7.AuxInt = int32ToAuxInt(3)
v7.AddArg(ptr)
v6.AddArg(v7)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
- v8.AuxInt = 0
+ v8.AuxInt = int32ToAuxInt(0)
v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v10.AuxInt = 0xff
+ v10.AuxInt = int32ToAuxInt(0xff)
v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v11.AuxInt = 3
+ v11.AuxInt = int32ToAuxInt(3)
v12 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
- v12.AuxInt = 3
+ v12.AuxInt = int32ToAuxInt(3)
v13 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
- v13.AuxInt = 3
+ v13.AuxInt = int32ToAuxInt(3)
v13.AddArg(ptr)
v12.AddArg(v13)
v11.AddArg(v12)
v.reset(OpMIPSLoweredAtomicOr)
v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = ^3
+ v1.AuxInt = int32ToAuxInt(^3)
v0.AddArg2(v1, ptr)
v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v3.AddArg(val)
v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v4.AuxInt = 3
+ v4.AuxInt = int32ToAuxInt(3)
v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
- v5.AuxInt = 3
+ v5.AuxInt = int32ToAuxInt(3)
v5.AddArg(ptr)
v4.AddArg(v5)
v2.AddArg2(v3, v4)
v.reset(OpMIPSLoweredAtomicOr)
v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = ^3
+ v1.AuxInt = int32ToAuxInt(^3)
v0.AddArg2(v1, ptr)
v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v3.AddArg(val)
v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
- v4.AuxInt = 3
+ v4.AuxInt = int32ToAuxInt(3)
v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
- v5.AuxInt = 3
+ v5.AuxInt = int32ToAuxInt(3)
v6 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
- v6.AuxInt = 3
+ v6.AuxInt = int32ToAuxInt(3)
v6.AddArg(ptr)
v5.AddArg(v6)
v4.AddArg(v5)
ptr := v_0
v.reset(OpMIPSSGTU)
v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v0.AuxInt = 0
+ v0.AuxInt = int32ToAuxInt(0)
v.AddArg2(ptr, v0)
return true
}
idx := v_0
len := v_1
v.reset(OpMIPSXORconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
v0.AddArg2(idx, len)
v.AddArg(v0)
if v_1.Op != OpMIPSMOVWconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSADDconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
// result: (MOVWaddr [off1+off2] {sym} ptr)
for {
- off1 := v.AuxInt
+ off1 := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
v.reset(OpMIPSMOVWaddr)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg(ptr)
return true
}
// match: (ADDconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
return true
}
// match: (ADDconst [c] (MOVWconst [d]))
- // result: (MOVWconst [int64(int32(c+d))])
+ // result: (MOVWconst [int32(c+d)])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(c + d))
+ v.AuxInt = int32ToAuxInt(int32(c + d))
return true
}
// match: (ADDconst [c] (ADDconst [d] x))
- // result: (ADDconst [int64(int32(c+d))] x)
+ // result: (ADDconst [c+d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSADDconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSADDconst)
- v.AuxInt = int64(int32(c + d))
+ v.AuxInt = int32ToAuxInt(c + d)
v.AddArg(x)
return true
}
// match: (ADDconst [c] (SUBconst [d] x))
- // result: (ADDconst [int64(int32(c-d))] x)
+ // result: (ADDconst [c-d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSSUBconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSADDconst)
- v.AuxInt = int64(int32(c - d))
+ v.AuxInt = int32ToAuxInt(c - d)
v.AddArg(x)
return true
}
if v_1.Op != OpMIPSMOVWconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSANDconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// result: (SGTUconst [1] (OR <x.Type> x y))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpMIPSSGTUconst || v_0.AuxInt != 1 {
+ if v_0.Op != OpMIPSSGTUconst || auxIntToInt32(v_0.AuxInt) != 1 {
continue
}
x := v_0.Args[0]
- if v_1.Op != OpMIPSSGTUconst || v_1.AuxInt != 1 {
+ if v_1.Op != OpMIPSSGTUconst || auxIntToInt32(v_1.AuxInt) != 1 {
continue
}
y := v_1.Args[0]
v.reset(OpMIPSSGTUconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
v0.AddArg2(x, y)
v.AddArg(v0)
// match: (ANDconst [0] _)
// result: (MOVWconst [0])
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (ANDconst [-1] x)
// result: x
for {
- if v.AuxInt != -1 {
+ if auxIntToInt32(v.AuxInt) != -1 {
break
}
x := v_0
// match: (ANDconst [c] (MOVWconst [d]))
// result: (MOVWconst [c&d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = c & d
+ v.AuxInt = int32ToAuxInt(c & d)
return true
}
// match: (ANDconst [c] (ANDconst [d] x))
// result: (ANDconst [c&d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSANDconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSANDconst)
- v.AuxInt = c & d
+ v.AuxInt = int32ToAuxInt(c & d)
v.AddArg(x)
return true
}
// result: f
for {
f := v_1
- if v_2.Op != OpMIPSMOVWconst || v_2.AuxInt != 0 {
+ if v_2.Op != OpMIPSMOVWconst || auxIntToInt32(v_2.AuxInt) != 0 {
break
}
v.copyOf(f)
if v_2.Op != OpMIPSMOVWconst {
break
}
- c := v_2.AuxInt
+ c := auxIntToInt32(v_2.AuxInt)
if !(c != 0) {
break
}
// result: (CMOVZzero a c)
for {
a := v_0
- if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
c := v_2
// match: (CMOVZzero _ (MOVWconst [0]))
// result: (MOVWconst [0])
for {
- if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (CMOVZzero a (MOVWconst [c]))
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
if !(c != 0) {
break
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (LoweredAtomicAdd ptr (MOVWconst [c]) mem)
- // cond: is16Bit(c)
+ // cond: is16Bit(int64(c))
// result: (LoweredAtomicAddconst [c] ptr mem)
for {
ptr := v_0
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
- if !(is16Bit(c)) {
+ if !(is16Bit(int64(c))) {
break
}
v.reset(OpMIPSLoweredAtomicAddconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg2(ptr, mem)
return true
}
// result: (LoweredAtomicStorezero ptr mem)
for {
ptr := v_0
- if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
mem := v_2
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVBUload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVBUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVBUreg x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVBstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
if x.Op != OpMIPSMOVBload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, OpMIPSMOVBUload, t)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
if v_0.Op != OpMIPSANDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSANDconst)
- v.AuxInt = c & 0xff
+ v.AuxInt = int32ToAuxInt(c & 0xff)
v.AddArg(x)
return true
}
// match: (MOVBUreg (MOVWconst [c]))
- // result: (MOVWconst [int64(uint8(c))])
+ // result: (MOVWconst [int32(uint8(c))])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(uint8(c))
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVBload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVBload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVBload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVBreg x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVBstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
if x.Op != OpMIPSMOVBUload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, OpMIPSMOVBload, t)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
if v_0.Op != OpMIPSANDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if !(c&0x80 == 0) {
break
}
v.reset(OpMIPSANDconst)
- v.AuxInt = c & 0x7f
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
v.AddArg(x)
return true
}
// match: (MOVBreg (MOVWconst [c]))
- // result: (MOVWconst [int64(int8(c))])
+ // result: (MOVWconst [int32(int8(c))])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int8(c))
+ v.AuxInt = int32ToAuxInt(int32(int8(c)))
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpMIPSMOVBstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem)
// result: (MOVBstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPSMOVBstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVBreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVBUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVHreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVHUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVBstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVBstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVBstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVBstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVDload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVDload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVDstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVDstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpMIPSMOVDstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVFload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVFload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVFload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVFstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVFstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVFstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpMIPSMOVFstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVHUload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVHUload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVHUreg x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVHstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
if x.Op != OpMIPSMOVHload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, OpMIPSMOVHUload, t)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
if v_0.Op != OpMIPSANDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSANDconst)
- v.AuxInt = c & 0xffff
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
v.AddArg(x)
return true
}
// match: (MOVHUreg (MOVWconst [c]))
- // result: (MOVWconst [int64(uint16(c))])
+ // result: (MOVWconst [int32(uint16(c))])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(uint16(c))
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVHload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVHload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVHload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVHreg x)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVHstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
if x.Op != OpMIPSMOVHUload {
break
}
- off := x.AuxInt
- sym := x.Aux
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
mem := x.Args[1]
ptr := x.Args[0]
if !(x.Uses == 1 && clobber(x)) {
b = x.Block
v0 := b.NewValue0(x.Pos, OpMIPSMOVHload, t)
v.copyOf(v0)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
if v_0.Op != OpMIPSANDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
if !(c&0x8000 == 0) {
break
}
v.reset(OpMIPSANDconst)
- v.AuxInt = c & 0x7fff
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
v.AddArg(x)
return true
}
// match: (MOVHreg (MOVWconst [c]))
- // result: (MOVWconst [int64(int16(c))])
+ // result: (MOVWconst [int32(int16(c))])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int16(c))
+ v.AuxInt = int32ToAuxInt(int32(int16(c)))
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVHstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpMIPSMOVHstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem)
// result: (MOVHstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPSMOVHstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVHreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVHUreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
// match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVHstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVHstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVHstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVHstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVWload [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVWload)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVWload)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVWstore {
break
}
- off2 := v_1.AuxInt
- sym2 := v_1.Aux
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
x := v_1.Args[1]
ptr2 := v_1.Args[0]
if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
if v_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
val := v_1
mem := v_2
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
val := v_1
mem := v_2
break
}
v.reset(OpMIPSMOVWstore)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg3(ptr, val, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem)
// result: (MOVWstorezero [off] {sym} ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
- if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
mem := v_2
v.reset(OpMIPSMOVWstorezero)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
// result: (MOVWstore [off] {sym} ptr x mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpMIPSMOVWreg {
break
x := v_1.Args[0]
mem := v_2
v.reset(OpMIPSMOVWstore)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
- // cond: (is16Bit(off1+off2) || x.Uses == 1)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
for {
- off1 := v.AuxInt
- sym := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
x := v_0
if x.Op != OpMIPSADDconst {
break
}
- off2 := x.AuxInt
+ off2 := auxIntToInt32(x.AuxInt)
ptr := x.Args[0]
mem := v_1
- if !(is16Bit(off1+off2) || x.Uses == 1) {
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
break
}
v.reset(OpMIPSMOVWstorezero)
- v.AuxInt = off1 + off2
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
// match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
- // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ // result: (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
if v_0.Op != OpMIPSMOVWaddr {
break
}
- off2 := v_0.AuxInt
- sym2 := v_0.Aux
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
ptr := v_0.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2)) {
break
}
v.reset(OpMIPSMOVWstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(ptr, mem)
return true
}
// result: (MOVWconst [0])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
continue
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
break
// result: x
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 1 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
continue
}
x := v_1
// result: (NEG x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != -1 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != -1 {
continue
}
x := v_1
}
// match: (MUL (MOVWconst [c]) x )
// cond: isPowerOfTwo(int64(uint32(c)))
- // result: (SLLconst [log2(int64(uint32(c)))] x)
+ // result: (SLLconst [int32(log2uint32(int64(c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpMIPSMOVWconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
if !(isPowerOfTwo(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSLLconst)
- v.AuxInt = log2(int64(uint32(c)))
+ v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
v.AddArg(x)
return true
}
break
}
// match: (MUL (MOVWconst [c]) (MOVWconst [d]))
- // result: (MOVWconst [int64(int32(c)*int32(d))])
+ // result: (MOVWconst [c*d])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpMIPSMOVWconst {
continue
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
if v_1.Op != OpMIPSMOVWconst {
continue
}
- d := v_1.AuxInt
+ d := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(c) * int32(d))
+ v.AuxInt = int32ToAuxInt(c * d)
return true
}
break
func rewriteValueMIPS_OpMIPSNEG(v *Value) bool {
v_0 := v.Args[0]
// match: (NEG (MOVWconst [c]))
- // result: (MOVWconst [int64(int32(-c))])
+ // result: (MOVWconst [-c])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(-c))
+ v.AuxInt = int32ToAuxInt(-c)
return true
}
return false
if v_1.Op != OpMIPSMOVWconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSNORconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (NORconst [c] (MOVWconst [d]))
// result: (MOVWconst [^(c|d)])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = ^(c | d)
+ v.AuxInt = int32ToAuxInt(^(c | d))
return true
}
return false
if v_1.Op != OpMIPSMOVWconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSORconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// match: (ORconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
// match: (ORconst [-1] _)
// result: (MOVWconst [-1])
for {
- if v.AuxInt != -1 {
+ if auxIntToInt32(v.AuxInt) != -1 {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = -1
+ v.AuxInt = int32ToAuxInt(-1)
return true
}
// match: (ORconst [c] (MOVWconst [d]))
// result: (MOVWconst [c|d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = c | d
+ v.AuxInt = int32ToAuxInt(c | d)
return true
}
// match: (ORconst [c] (ORconst [d] x))
// result: (ORconst [c|d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSORconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSORconst)
- v.AuxInt = c | d
+ v.AuxInt = int32ToAuxInt(c | d)
v.AddArg(x)
return true
}
if v_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(OpMIPSSGTconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// result: (SGTzero x)
for {
x := v_0
- if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
v.reset(OpMIPSSGTzero)
if v_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
x := v_1
v.reset(OpMIPSSGTUconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
// result: (SGTUzero x)
for {
x := v_0
- if v_1.Op != OpMIPSMOVWconst || v_1.AuxInt != 0 {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
break
}
v.reset(OpMIPSSGTUzero)
func rewriteValueMIPS_OpMIPSSGTUconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SGTUconst [c] (MOVWconst [d]))
- // cond: uint32(c)>uint32(d)
+ // cond: uint32(c) > uint32(d)
// result: (MOVWconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
if !(uint32(c) > uint32(d)) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTUconst [c] (MOVWconst [d]))
- // cond: uint32(c)<=uint32(d)
+ // cond: uint32(c) <= uint32(d)
// result: (MOVWconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
if !(uint32(c) <= uint32(d)) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SGTUconst [c] (MOVBUreg _))
// cond: 0xff < uint32(c)
// result: (MOVWconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVBUreg || !(0xff < uint32(c)) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTUconst [c] (MOVHUreg _))
// cond: 0xffff < uint32(c)
// result: (MOVWconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVHUreg || !(0xffff < uint32(c)) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTUconst [c] (ANDconst [m] _))
// cond: uint32(m) < uint32(c)
// result: (MOVWconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSANDconst {
break
}
- m := v_0.AuxInt
+ m := auxIntToInt32(v_0.AuxInt)
if !(uint32(m) < uint32(c)) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTUconst [c] (SRLconst _ [d]))
// cond: uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
// result: (MOVWconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSSRLconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
if !(uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
return false
func rewriteValueMIPS_OpMIPSSGTUzero(v *Value) bool {
v_0 := v.Args[0]
// match: (SGTUzero (MOVWconst [d]))
- // cond: uint32(d) != 0
+ // cond: d != 0
// result: (MOVWconst [1])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
- if !(uint32(d) != 0) {
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d != 0) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTUzero (MOVWconst [d]))
- // cond: uint32(d) == 0
+ // cond: d == 0
// result: (MOVWconst [0])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
- if !(uint32(d) == 0) {
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d == 0) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
func rewriteValueMIPS_OpMIPSSGTconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SGTconst [c] (MOVWconst [d]))
- // cond: int32(c) > int32(d)
+ // cond: c > d
// result: (MOVWconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
- if !(int32(c) > int32(d)) {
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(c > d) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVWconst [d]))
- // cond: int32(c) <= int32(d)
+ // cond: c <= d
// result: (MOVWconst [0])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
- if !(int32(c) <= int32(d)) {
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(c <= d) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVBreg _))
- // cond: 0x7f < int32(c)
+ // cond: 0x7f < c
// result: (MOVWconst [1])
for {
- c := v.AuxInt
- if v_0.Op != OpMIPSMOVBreg || !(0x7f < int32(c)) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBreg || !(0x7f < c) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVBreg _))
- // cond: int32(c) <= -0x80
+ // cond: c <= -0x80
// result: (MOVWconst [0])
for {
- c := v.AuxInt
- if v_0.Op != OpMIPSMOVBreg || !(int32(c) <= -0x80) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBreg || !(c <= -0x80) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVBUreg _))
- // cond: 0xff < int32(c)
+ // cond: 0xff < c
// result: (MOVWconst [1])
for {
- c := v.AuxInt
- if v_0.Op != OpMIPSMOVBUreg || !(0xff < int32(c)) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(0xff < c) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVBUreg _))
- // cond: int32(c) < 0
+ // cond: c < 0
// result: (MOVWconst [0])
for {
- c := v.AuxInt
- if v_0.Op != OpMIPSMOVBUreg || !(int32(c) < 0) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(c < 0) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVHreg _))
- // cond: 0x7fff < int32(c)
+ // cond: 0x7fff < c
// result: (MOVWconst [1])
for {
- c := v.AuxInt
- if v_0.Op != OpMIPSMOVHreg || !(0x7fff < int32(c)) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHreg || !(0x7fff < c) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVHreg _))
- // cond: int32(c) <= -0x8000
+ // cond: c <= -0x8000
// result: (MOVWconst [0])
for {
- c := v.AuxInt
- if v_0.Op != OpMIPSMOVHreg || !(int32(c) <= -0x8000) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHreg || !(c <= -0x8000) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SGTconst [c] (MOVHUreg _))
- // cond: 0xffff < int32(c)
+ // cond: 0xffff < c
// result: (MOVWconst [1])
for {
- c := v.AuxInt
- if v_0.Op != OpMIPSMOVHUreg || !(0xffff < int32(c)) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(0xffff < c) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTconst [c] (MOVHUreg _))
- // cond: int32(c) < 0
+ // cond: c < 0
// result: (MOVWconst [0])
for {
- c := v.AuxInt
- if v_0.Op != OpMIPSMOVHUreg || !(int32(c) < 0) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(c < 0) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SGTconst [c] (ANDconst [m] _))
- // cond: 0 <= int32(m) && int32(m) < int32(c)
+ // cond: 0 <= m && m < c
// result: (MOVWconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSANDconst {
break
}
- m := v_0.AuxInt
- if !(0 <= int32(m) && int32(m) < int32(c)) {
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < c) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTconst [c] (SRLconst _ [d]))
- // cond: 0 <= int32(c) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
+ // cond: 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
// result: (MOVWconst [1])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSSRLconst {
break
}
- d := v_0.AuxInt
- if !(0 <= int32(c) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
return false
func rewriteValueMIPS_OpMIPSSGTzero(v *Value) bool {
v_0 := v.Args[0]
// match: (SGTzero (MOVWconst [d]))
- // cond: int32(d) > 0
+ // cond: d > 0
// result: (MOVWconst [1])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
- if !(int32(d) > 0) {
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d > 0) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 1
+ v.AuxInt = int32ToAuxInt(1)
return true
}
// match: (SGTzero (MOVWconst [d]))
- // cond: int32(d) <= 0
+ // cond: d <= 0
// result: (MOVWconst [0])
for {
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
- if !(int32(d) <= 0) {
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d <= 0) {
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
func rewriteValueMIPS_OpMIPSSLL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (SLL _ (MOVWconst [c]))
- // cond: uint32(c)>=32
- // result: (MOVWconst [0])
- for {
- if v_1.Op != OpMIPSMOVWconst {
- break
- }
- c := v_1.AuxInt
- if !(uint32(c) >= 32) {
- break
- }
- v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
- return true
- }
// match: (SLL x (MOVWconst [c]))
// result: (SLLconst x [c])
for {
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSSLLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
func rewriteValueMIPS_OpMIPSSLLconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SLLconst [c] (MOVWconst [d]))
- // result: (MOVWconst [int64(int32(uint32(d)<<uint32(c)))])
+ // result: (MOVWconst [d<<uint32(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(uint32(d) << uint32(c)))
+ v.AuxInt = int32ToAuxInt(d << uint32(c))
return true
}
return false
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRA x (MOVWconst [c]))
- // cond: uint32(c)>=32
+ // cond: c >= 32
// result: (SRAconst x [31])
for {
x := v_0
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
- if !(uint32(c) >= 32) {
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c >= 32) {
break
}
v.reset(OpMIPSSRAconst)
- v.AuxInt = 31
+ v.AuxInt = int32ToAuxInt(31)
v.AddArg(x)
return true
}
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSSRAconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
func rewriteValueMIPS_OpMIPSSRAconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SRAconst [c] (MOVWconst [d]))
- // result: (MOVWconst [int64(int32(d)>>uint32(c))])
+ // result: (MOVWconst [d>>uint32(c)])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(d) >> uint32(c))
+ v.AuxInt = int32ToAuxInt(d >> uint32(c))
return true
}
return false
func rewriteValueMIPS_OpMIPSSRL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (SRL _ (MOVWconst [c]))
- // cond: uint32(c)>=32
- // result: (MOVWconst [0])
- for {
- if v_1.Op != OpMIPSMOVWconst {
- break
- }
- c := v_1.AuxInt
- if !(uint32(c) >= 32) {
- break
- }
- v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
- return true
- }
// match: (SRL x (MOVWconst [c]))
// result: (SRLconst x [c])
for {
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSSRLconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
func rewriteValueMIPS_OpMIPSSRLconst(v *Value) bool {
v_0 := v.Args[0]
// match: (SRLconst [c] (MOVWconst [d]))
- // result: (MOVWconst [int64(uint32(d)>>uint32(c))])
+ // result: (MOVWconst [int32(uint32(d)>>uint32(c))])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(uint32(d) >> uint32(c))
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint32(c)))
return true
}
return false
if v_1.Op != OpMIPSMOVWconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSSUBconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SUB (MOVWconst [0]) x)
// result: (NEG x)
for {
- if v_0.Op != OpMIPSMOVWconst || v_0.AuxInt != 0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
break
}
x := v_1
// match: (SUBconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
return true
}
// match: (SUBconst [c] (MOVWconst [d]))
- // result: (MOVWconst [int64(int32(d-c))])
+ // result: (MOVWconst [d-c])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(d - c))
+ v.AuxInt = int32ToAuxInt(d - c)
return true
}
// match: (SUBconst [c] (SUBconst [d] x))
- // result: (ADDconst [int64(int32(-c-d))] x)
+ // result: (ADDconst [-c-d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSSUBconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSADDconst)
- v.AuxInt = int64(int32(-c - d))
+ v.AuxInt = int32ToAuxInt(-c - d)
v.AddArg(x)
return true
}
// match: (SUBconst [c] (ADDconst [d] x))
- // result: (ADDconst [int64(int32(-c+d))] x)
+ // result: (ADDconst [-c+d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSADDconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSADDconst)
- v.AuxInt = int64(int32(-c + d))
+ v.AuxInt = int32ToAuxInt(-c + d)
v.AddArg(x)
return true
}
if v_1.Op != OpMIPSMOVWconst {
continue
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
v.reset(OpMIPSXORconst)
- v.AuxInt = c
+ v.AuxInt = int32ToAuxInt(c)
v.AddArg(x)
return true
}
break
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
return false
// match: (XORconst [0] x)
// result: x
for {
- if v.AuxInt != 0 {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
x := v_0
// match: (XORconst [-1] x)
// result: (NORconst [0] x)
for {
- if v.AuxInt != -1 {
+ if auxIntToInt32(v.AuxInt) != -1 {
break
}
x := v_0
v.reset(OpMIPSNORconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
v.AddArg(x)
return true
}
// match: (XORconst [c] (MOVWconst [d]))
// result: (MOVWconst [c^d])
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSMOVWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int32ToAuxInt(c ^ d)
return true
}
// match: (XORconst [c] (XORconst [d] x))
// result: (XORconst [c^d] x)
for {
- c := v.AuxInt
+ c := auxIntToInt32(v.AuxInt)
if v_0.Op != OpMIPSXORconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpMIPSXORconst)
- v.AuxInt = c ^ d
+ v.AuxInt = int32ToAuxInt(c ^ d)
v.AddArg(x)
return true
}
// cond: boundsABI(kind) == 0
// result: (LoweredPanicBoundsA [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpMIPSLoweredPanicBoundsA)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 1
// result: (LoweredPanicBoundsB [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpMIPSLoweredPanicBoundsB)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 2
// result: (LoweredPanicBoundsC [kind] x y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
mem := v_2
break
}
v.reset(OpMIPSLoweredPanicBoundsC)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg3(x, y, mem)
return true
}
// cond: boundsABI(kind) == 0
// result: (LoweredPanicExtendA [kind] hi lo y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
hi := v_0
lo := v_1
y := v_2
break
}
v.reset(OpMIPSLoweredPanicExtendA)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg4(hi, lo, y, mem)
return true
}
// cond: boundsABI(kind) == 1
// result: (LoweredPanicExtendB [kind] hi lo y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
hi := v_0
lo := v_1
y := v_2
break
}
v.reset(OpMIPSLoweredPanicExtendB)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg4(hi, lo, y, mem)
return true
}
// cond: boundsABI(kind) == 2
// result: (LoweredPanicExtendC [kind] hi lo y mem)
for {
- kind := v.AuxInt
+ kind := auxIntToInt64(v.AuxInt)
hi := v_0
lo := v_1
y := v_2
break
}
v.reset(OpMIPSLoweredPanicExtendC)
- v.AuxInt = kind
+ v.AuxInt = int64ToAuxInt(kind)
v.AddArg4(hi, lo, y, mem)
return true
}
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 {
continue
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
break
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 1 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
continue
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
break
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != -1 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 {
continue
}
x := v_0_1
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type)
- v0.AuxInt = -1
+ v0.AuxInt = int32ToAuxInt(-1)
v0.AddArg(x)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v1.AuxInt = 0
+ v1.AuxInt = int32ToAuxInt(0)
v.AddArg3(v0, v1, x)
return true
}
}
// match: (Select0 (MULTU (MOVWconst [c]) x ))
// cond: isPowerOfTwo(int64(uint32(c)))
- // result: (SRLconst [32-log2(int64(uint32(c)))] x)
+ // result: (SRLconst [int32(32-log2uint32(int64(c)))] x)
for {
if v_0.Op != OpMIPSMULTU {
break
if v_0_0.Op != OpMIPSMOVWconst {
continue
}
- c := v_0_0.AuxInt
+ c := auxIntToInt32(v_0_0.AuxInt)
x := v_0_1
if !(isPowerOfTwo(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSRLconst)
- v.AuxInt = 32 - log2(int64(uint32(c)))
+ v.AuxInt = int32ToAuxInt(int32(32 - log2uint32(int64(c))))
v.AddArg(x)
return true
}
break
}
// match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d])))
- // result: (MOVWconst [(c*d)>>32])
+ // result: (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
for {
if v_0.Op != OpMIPSMULTU {
break
if v_0_0.Op != OpMIPSMOVWconst {
continue
}
- c := v_0_0.AuxInt
+ c := auxIntToInt32(v_0_0.AuxInt)
if v_0_1.Op != OpMIPSMOVWconst {
continue
}
- d := v_0_1.AuxInt
+ d := auxIntToInt32(v_0_1.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = (c * d) >> 32
+ v.AuxInt = int32ToAuxInt(int32((int64(uint32(c)) * int64(uint32(d))) >> 32))
return true
}
break
}
// match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d])))
- // result: (MOVWconst [int64(int32(c)%int32(d))])
+ // result: (MOVWconst [c%d])
for {
if v_0.Op != OpMIPSDIV {
break
if v_0_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt32(v_0_0.AuxInt)
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPSMOVWconst {
break
}
- d := v_0_1.AuxInt
+ d := auxIntToInt32(v_0_1.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(c) % int32(d))
+ v.AuxInt = int32ToAuxInt(c % d)
return true
}
// match: (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d])))
- // result: (MOVWconst [int64(int32(uint32(c)%uint32(d)))])
+ // result: (MOVWconst [int32(uint32(c)%uint32(d))])
for {
if v_0.Op != OpMIPSDIVU {
break
if v_0_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt32(v_0_0.AuxInt)
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPSMOVWconst {
break
}
- d := v_0_1.AuxInt
+ d := auxIntToInt32(v_0_1.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(uint32(c) % uint32(d)))
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
return true
}
return false
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 {
continue
}
v.reset(OpMIPSMOVWconst)
- v.AuxInt = 0
+ v.AuxInt = int32ToAuxInt(0)
return true
}
break
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != 1 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
continue
}
x := v_0_1
v_0_0 := v_0.Args[0]
v_0_1 := v_0.Args[1]
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpMIPSMOVWconst || v_0_0.AuxInt != -1 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 {
continue
}
x := v_0_1
}
// match: (Select1 (MULTU (MOVWconst [c]) x ))
// cond: isPowerOfTwo(int64(uint32(c)))
- // result: (SLLconst [log2(int64(uint32(c)))] x)
+ // result: (SLLconst [int32(log2uint32(int64(c)))] x)
for {
if v_0.Op != OpMIPSMULTU {
break
if v_0_0.Op != OpMIPSMOVWconst {
continue
}
- c := v_0_0.AuxInt
+ c := auxIntToInt32(v_0_0.AuxInt)
x := v_0_1
if !(isPowerOfTwo(int64(uint32(c)))) {
continue
}
v.reset(OpMIPSSLLconst)
- v.AuxInt = log2(int64(uint32(c)))
+ v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
v.AddArg(x)
return true
}
break
}
// match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d])))
- // result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))])
+ // result: (MOVWconst [int32(uint32(c)*uint32(d))])
for {
if v_0.Op != OpMIPSMULTU {
break
if v_0_0.Op != OpMIPSMOVWconst {
continue
}
- c := v_0_0.AuxInt
+ c := auxIntToInt32(v_0_0.AuxInt)
if v_0_1.Op != OpMIPSMOVWconst {
continue
}
- d := v_0_1.AuxInt
+ d := auxIntToInt32(v_0_1.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(uint32(c) * uint32(d)))
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) * uint32(d)))
return true
}
break
}
// match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d])))
- // result: (MOVWconst [int64(int32(c)/int32(d))])
+ // result: (MOVWconst [c/d])
for {
if v_0.Op != OpMIPSDIV {
break
if v_0_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt32(v_0_0.AuxInt)
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPSMOVWconst {
break
}
- d := v_0_1.AuxInt
+ d := auxIntToInt32(v_0_1.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(c) / int32(d))
+ v.AuxInt = int32ToAuxInt(c / d)
return true
}
// match: (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d])))
- // result: (MOVWconst [int64(int32(uint32(c)/uint32(d)))])
+ // result: (MOVWconst [int32(uint32(c)/uint32(d))])
for {
if v_0.Op != OpMIPSDIVU {
break
if v_0_0.Op != OpMIPSMOVWconst {
break
}
- c := v_0_0.AuxInt
+ c := auxIntToInt32(v_0_0.AuxInt)
v_0_1 := v_0.Args[1]
if v_0_1.Op != OpMIPSMOVWconst {
break
}
- d := v_0_1.AuxInt
+ d := auxIntToInt32(v_0_1.AuxInt)
v.reset(OpMIPSMOVWconst)
- v.AuxInt = int64(int32(uint32(c) / uint32(d)))
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
return true
}
return false
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (NE x yes no)
for b.Controls[0].Op == OpMIPSSGTUconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
// result: (GEZ x yes no)
for b.Controls[0].Op == OpMIPSSGTconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
x := v_0.Args[0]
// result: (First yes no)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
b.Reset(BlockFirst)
// result: (First no yes)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
if !(c != 0) {
break
}
}
case BlockMIPSGEZ:
// match: (GEZ (MOVWconst [c]) yes no)
- // cond: int32(c) >= 0
+ // cond: c >= 0
// result: (First yes no)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
- if !(int32(c) >= 0) {
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c >= 0) {
break
}
b.Reset(BlockFirst)
return true
}
// match: (GEZ (MOVWconst [c]) yes no)
- // cond: int32(c) < 0
+ // cond: c < 0
// result: (First no yes)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
- if !(int32(c) < 0) {
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c < 0) {
break
}
b.Reset(BlockFirst)
}
case BlockMIPSGTZ:
// match: (GTZ (MOVWconst [c]) yes no)
- // cond: int32(c) > 0
+ // cond: c > 0
// result: (First yes no)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
- if !(int32(c) > 0) {
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c > 0) {
break
}
b.Reset(BlockFirst)
return true
}
// match: (GTZ (MOVWconst [c]) yes no)
- // cond: int32(c) <= 0
+ // cond: c <= 0
// result: (First no yes)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
- if !(int32(c) <= 0) {
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c <= 0) {
break
}
b.Reset(BlockFirst)
}
case BlockMIPSLEZ:
// match: (LEZ (MOVWconst [c]) yes no)
- // cond: int32(c) <= 0
+ // cond: c <= 0
// result: (First yes no)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
- if !(int32(c) <= 0) {
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c <= 0) {
break
}
b.Reset(BlockFirst)
return true
}
// match: (LEZ (MOVWconst [c]) yes no)
- // cond: int32(c) > 0
+ // cond: c > 0
// result: (First no yes)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
- if !(int32(c) > 0) {
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c > 0) {
break
}
b.Reset(BlockFirst)
}
case BlockMIPSLTZ:
// match: (LTZ (MOVWconst [c]) yes no)
- // cond: int32(c) < 0
+ // cond: c < 0
// result: (First yes no)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
- if !(int32(c) < 0) {
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c < 0) {
break
}
b.Reset(BlockFirst)
return true
}
// match: (LTZ (MOVWconst [c]) yes no)
- // cond: int32(c) >= 0
+ // cond: c >= 0
// result: (First no yes)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
- if !(int32(c) >= 0) {
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c >= 0) {
break
}
b.Reset(BlockFirst)
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ cmp yes no)
for b.Controls[0].Op == OpMIPSXORconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
cmp := v_0.Args[0]
// result: (EQ x yes no)
for b.Controls[0].Op == OpMIPSSGTUconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 1 {
+ if auxIntToInt32(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
// result: (LTZ x yes no)
for b.Controls[0].Op == OpMIPSSGTconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
x := v_0.Args[0]
// result: (First no yes)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- if v_0.AuxInt != 0 {
+ if auxIntToInt32(v_0.AuxInt) != 0 {
break
}
b.Reset(BlockFirst)
// result: (First yes no)
for b.Controls[0].Op == OpMIPSMOVWconst {
v_0 := b.Controls[0]
- c := v_0.AuxInt
+ c := auxIntToInt32(v_0.AuxInt)
if !(c != 0) {
break
}