// It is created once, early during compilation,
// and shared across all compilations.
type Config struct {
- arch string // "amd64", etc.
- IntSize int64 // 4 or 8
- PtrSize int64 // 4 or 8
- RegSize int64 // 4 or 8
- lowerBlock func(*Block, *Config) bool // lowering function
- lowerValue func(*Value, *Config) bool // lowering function
- registers []Register // machine registers
- gpRegMask regMask // general purpose integer register mask
- fpRegMask regMask // floating point register mask
- specialRegMask regMask // special register mask
- FPReg int8 // register number of frame pointer, -1 if not used
- LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
- hasGReg bool // has hardware g register
- fe Frontend // callbacks into compiler frontend
- ctxt *obj.Link // Generic arch information
- optimize bool // Do optimization
- noDuffDevice bool // Don't use Duff's device
- nacl bool // GOOS=nacl
- use387 bool // GO386=387
- OldArch bool // True for older versions of architecture, e.g. true for PPC64BE, false for PPC64LE
- NeedsFpScratch bool // No direct move between GP and FP register sets
- BigEndian bool //
- sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
+ arch string // "amd64", etc.
+ IntSize int64 // 4 or 8
+ PtrSize int64 // 4 or 8
+ RegSize int64 // 4 or 8
+ lowerBlock blockRewriter // lowering function
+ lowerValue valueRewriter // lowering function
+ registers []Register // machine registers
+ gpRegMask regMask // general purpose integer register mask
+ fpRegMask regMask // floating point register mask
+ specialRegMask regMask // special register mask
+ FPReg int8 // register number of frame pointer, -1 if not used
+ LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
+ hasGReg bool // has hardware g register
+ fe Frontend // callbacks into compiler frontend
+ ctxt *obj.Link // Generic arch information
+ optimize bool // Do optimization
+ noDuffDevice bool // Don't use Duff's device
+ nacl bool // GOOS=nacl
+ use387 bool // GO386=387
+ OldArch bool // True for older versions of architecture, e.g. true for PPC64BE, false for PPC64LE
+ NeedsFpScratch bool // No direct move between GP and FP register sets
+ BigEndian bool //
+ sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
}
+type (
+ blockRewriter func(*Block) bool
+ valueRewriter func(*Value) bool
+)
+
type TypeSource interface {
TypeBool() Type
TypeInt8() Type
(Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x)
-(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
-(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Neg32F x) && config.use387 -> (FCHS x)
(Neg64F x) && config.use387 -> (FCHS x)
(Neg32 x) -> (NEGL x)
(Neg16 x) -> (NEGL x)
(Neg8 x) -> (NEGL x)
-(Neg32F x) -> (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
-(Neg64F x) -> (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+(Neg32F x) -> (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+(Neg64F x) -> (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
(Com64 x) -> (NOTQ x)
(Com32 x) -> (NOTL x)
// Lowering other arithmetic
(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
-(Ctz32 x) -> (Select0 (BSFQ (ORQ <config.Frontend().TypeUInt64()> (MOVQconst [1<<32]) x)))
+(Ctz32 x) -> (Select0 (BSFQ (ORQ <fe.TypeUInt64()> (MOVQconst [1<<32]) x)))
(BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x))))
-(BitLen32 x) -> (BitLen64 (MOVLQZX <config.Frontend().TypeUInt64()> x))
+(BitLen32 x) -> (BitLen64 (MOVLQZX <fe.TypeUInt64()> x))
(Bswap64 x) -> (BSWAPQ x)
(Bswap32 x) -> (BSWAPL x)
// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
-(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem))
-(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem))
-(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
-(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
+(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(fe.TypeUInt32(),TypeMem)> val ptr mem))
+(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(fe.TypeUInt64(),TypeMem)> val ptr mem))
+(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
+(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
// Atomic exchanges.
(AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no)
// Disabled because it interferes with the pattern match above and makes worse code.
-// (SETNEF x) -> (ORQ (SETNE <config.Frontend().TypeInt8()> x) (SETNAN <config.Frontend().TypeInt8()> x))
-// (SETEQF x) -> (ANDQ (SETEQ <config.Frontend().TypeInt8()> x) (SETORD <config.Frontend().TypeInt8()> x))
+// (SETNEF x) -> (ORQ (SETNE <fe.TypeInt8()> x) (SETNAN <fe.TypeInt8()> x))
+// (SETEQF x) -> (ANDQ (SETEQ <fe.TypeInt8()> x) (SETORD <fe.TypeInt8()> x))
// fold constants into instructions
(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
(Mul32uhilo x y) -> (MULLU x y)
(Div32 x y) ->
- (SUB (XOR <config.fe.TypeUInt32()> // negate the result if one operand is negative
- (Select0 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
- (SUB <config.fe.TypeUInt32()> (XOR x <config.fe.TypeUInt32()> (Signmask x)) (Signmask x)) // negate x if negative
- (SUB <config.fe.TypeUInt32()> (XOR y <config.fe.TypeUInt32()> (Signmask y)) (Signmask y)))) // negate y if negative
- (Signmask (XOR <config.fe.TypeUInt32()> x y))) (Signmask (XOR <config.fe.TypeUInt32()> x y)))
-(Div32u x y) -> (Select0 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
+ (SUB (XOR <fe.TypeUInt32()> // negate the result if one operand is negative
+ (Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
+ (SUB <fe.TypeUInt32()> (XOR x <fe.TypeUInt32()> (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <fe.TypeUInt32()> (XOR y <fe.TypeUInt32()> (Signmask y)) (Signmask y)))) // negate y if negative
+ (Signmask (XOR <fe.TypeUInt32()> x y))) (Signmask (XOR <fe.TypeUInt32()> x y)))
+(Div32u x y) -> (Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y))
(Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y))
(Div64F x y) -> (DIVD x y)
(Mod32 x y) ->
- (SUB (XOR <config.fe.TypeUInt32()> // negate the result if x is negative
- (Select1 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
- (SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> x (Signmask x)) (Signmask x)) // negate x if negative
- (SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) // negate y if negative
+ (SUB (XOR <fe.TypeUInt32()> // negate the result if x is negative
+ (Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
+ (SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> x (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) // negate y if negative
(Signmask x)) (Signmask x))
-(Mod32u x y) -> (Select1 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
+(Mod32u x y) -> (Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
-(EqB x y) -> (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+(EqB x y) -> (XORconst [1] (XOR <fe.TypeBool()> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRLconst x [c])
(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLLconst x [c])
-(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLLconst x [c])
-(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
-(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
// large constant shifts
(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
// large constant signed right shift, we leave the sign bit
(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAconst x [31])
-(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
-(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
// constants
(Const8 [val]) -> (MOVWconst [val])
(SignExt16to32 x) -> (MOVHreg x)
(Signmask x) -> (SRAconst x [31])
-(Zeromask x) -> (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
+(Zeromask x) -> (SRAconst (RSBshiftRL <fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
(Slicemask <t> x) -> (SRAconst (RSBconst <t> [0] x) [31])
// float <-> int conversion
(Hmul64 x y) -> (MULH x y)
(Hmul64u x y) -> (UMULH x y)
-(Hmul32 x y) -> (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
-(Hmul32u x y) -> (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
+(Hmul32 x y) -> (SRAconst (MULL <fe.TypeInt64()> x y) [32])
+(Hmul32u x y) -> (SRAconst (UMULL <fe.TypeUInt64()> x y) [32])
(Div64 x y) -> (DIV x y)
(Div64u x y) -> (UDIV x y)
(Ctz64 <t> x) -> (CLZ (RBIT <t> x))
(Ctz32 <t> x) -> (CLZW (RBITW <t> x))
-(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <config.fe.TypeInt()> x))
+(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <fe.TypeInt()> x))
(Bswap64 x) -> (REV x)
(Bswap32 x) -> (REVW x)
(BitRev64 x) -> (RBIT x)
(BitRev32 x) -> (RBITW x)
-(BitRev16 x) -> (SRLconst [48] (RBIT <config.fe.TypeUInt64()> x))
-(BitRev8 x) -> (SRLconst [56] (RBIT <config.fe.TypeUInt64()> x))
+(BitRev16 x) -> (SRLconst [48] (RBIT <fe.TypeUInt64()> x))
+(BitRev8 x) -> (SRLconst [56] (RBIT <fe.TypeUInt64()> x))
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
-(EqB x y) -> (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
+(EqB x y) -> (XOR (MOVDconst [1]) (XOR <fe.TypeBool()> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XOR (MOVDconst [1]) x)
(Add64F x y) -> (ADDD x y)
(Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
-(Select1 (Add32carry <t> x y)) -> (SGTU <config.fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
+(Select1 (Add32carry <t> x y)) -> (SGTU <fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
(Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
(SubPtr x y) -> (SUB x y)
(Sub64F x y) -> (SUBD x y)
(Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
-(Select1 (Sub32carry <t> x y)) -> (SGTU <config.fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
+(Select1 (Sub32carry <t> x y)) -> (SGTU <fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
(Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
(Mul32 x y) -> (MUL x y)
(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
-(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
+(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
-(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
-(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
+(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
// large constant shifts
(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
// large constant signed right shift, we leave the sign bit
(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
-(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
-(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
// shifts
// hardware instruction uses only the low 5 bits of the shift
(Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
(Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-(Rsh32x32 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh32x16 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh32x8 x y) -> (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh32x32 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh32x16 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh32x8 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
-(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
+(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
// unary ops
(Neg32 x) -> (NEG x)
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
-(EqB x y) -> (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+(EqB x y) -> (XORconst [1] (XOR <fe.TypeBool()> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
(AtomicOr8 ptr val mem) && !config.BigEndian ->
- (LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
- (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
- (SLLconst <config.fe.TypeUInt32()> [3]
- (ANDconst <config.fe.TypeUInt32()> [3] ptr))) mem)
+ (LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
+ (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
+ (SLLconst <fe.TypeUInt32()> [3]
+ (ANDconst <fe.TypeUInt32()> [3] ptr))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
(AtomicAnd8 ptr val mem) && !config.BigEndian ->
- (LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
- (OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
- (SLLconst <config.fe.TypeUInt32()> [3]
- (ANDconst <config.fe.TypeUInt32()> [3] ptr)))
- (NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()>
- (MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3]
- (ANDconst <config.fe.TypeUInt32()> [3]
- (XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
+ (LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
+ (OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
+ (SLLconst <fe.TypeUInt32()> [3]
+ (ANDconst <fe.TypeUInt32()> [3] ptr)))
+ (NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()>
+ (MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3]
+ (ANDconst <fe.TypeUInt32()> [3]
+ (XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
(AtomicOr8 ptr val mem) && config.BigEndian ->
- (LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
- (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
- (SLLconst <config.fe.TypeUInt32()> [3]
- (ANDconst <config.fe.TypeUInt32()> [3]
- (XORconst <config.fe.TypeUInt32()> [3] ptr)))) mem)
+ (LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
+ (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
+ (SLLconst <fe.TypeUInt32()> [3]
+ (ANDconst <fe.TypeUInt32()> [3]
+ (XORconst <fe.TypeUInt32()> [3] ptr)))) mem)
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
(AtomicAnd8 ptr val mem) && config.BigEndian ->
- (LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
- (OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val)
- (SLLconst <config.fe.TypeUInt32()> [3]
- (ANDconst <config.fe.TypeUInt32()> [3]
- (XORconst <config.fe.TypeUInt32()> [3] ptr))))
- (NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()>
- (MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3]
- (ANDconst <config.fe.TypeUInt32()> [3]
- (XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
+ (LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
+ (OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
+ (SLLconst <fe.TypeUInt32()> [3]
+ (ANDconst <fe.TypeUInt32()> [3]
+ (XORconst <fe.TypeUInt32()> [3] ptr))))
+ (NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()>
+ (MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3]
+ (ANDconst <fe.TypeUInt32()> [3]
+ (XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
// checks
(Hmul64 x y) -> (Select0 (MULV x y))
(Hmul64u x y) -> (Select0 (MULVU x y))
-(Hmul32 x y) -> (SRAVconst (Select1 <config.fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
-(Hmul32u x y) -> (SRLVconst (Select1 <config.fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+(Hmul32 x y) -> (SRAVconst (Select1 <fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+(Hmul32u x y) -> (SRLVconst (Select1 <fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
(Div64 x y) -> (Select1 (DIVV x y))
(Div64u x y) -> (Select1 (DIVVU x y))
// shifts
// hardware instruction uses only the low 6 bits of the shift
// we compare to 64 to ensure Go semantics for large shifts
-(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
-(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
-(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
-(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
-(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
-
-(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
-(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
-(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
-(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
-
-(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
-(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
-(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
-(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
-
-(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
-(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
-(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
-(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
-
-(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
-(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
-(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
-(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
-
-(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
-(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
-(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
-(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
-
-(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
-(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
-(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
-(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
-
-(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
-(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
-(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
-(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
-
-(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
-(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
-(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
-(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
+(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
+(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
+(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
+
+(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
+(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
+
+(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
+(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
+
+(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
+(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
// unary ops
(Neg64 x) -> (NEGV x)
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB x y) -> (AND x y)
(OrB x y) -> (OR x y)
-(EqB x y) -> (XOR (MOVVconst [1]) (XOR <config.fe.TypeBool()> x y))
+(EqB x y) -> (XOR (MOVVconst [1]) (XOR <fe.TypeBool()> x y))
(NeqB x y) -> (XOR x y)
(Not x) -> (XORconst [1] x)
(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
-(Rsh64x64 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-(Rsh64Ux64 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-(Lsh64x64 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+(Rsh64x64 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+(Rsh64Ux64 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+(Lsh64x64 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-(Rsh32x64 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-(Rsh32Ux64 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-(Lsh32x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+(Rsh32x64 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+(Rsh32Ux64 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+(Lsh32x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-(Lsh16x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+(Lsh16x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-(Lsh8x64 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+(Lsh8x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-(Rsh64x32 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-(Rsh64Ux32 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-(Lsh64x32 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+(Rsh64x32 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+(Rsh64Ux32 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+(Lsh64x32 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-(Rsh32x32 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-(Rsh32Ux32 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-(Lsh32x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+(Rsh32x32 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+(Rsh32Ux32 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+(Lsh32x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-(Lsh16x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+(Lsh16x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-(Lsh8x32 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+(Lsh8x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-(Rsh64x16 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-(Rsh64Ux16 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-(Lsh64x16 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+(Rsh64x16 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+(Rsh64Ux16 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+(Lsh64x16 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-(Rsh32x16 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-(Rsh32Ux16 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-(Lsh32x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+(Rsh32x16 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+(Rsh32Ux16 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+(Lsh32x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-(Lsh16x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+(Lsh16x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-(Lsh8x16 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+(Lsh8x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-(Rsh64x8 x y) -> (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-(Rsh64Ux8 x y) -> (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-(Lsh64x8 x y) -> (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+(Rsh64x8 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+(Rsh64Ux8 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+(Lsh64x8 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-(Rsh32x8 x y) -> (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-(Rsh32Ux8 x y) -> (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-(Lsh32x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+(Rsh32x8 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+(Rsh32Ux8 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+(Lsh32x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-(Lsh16x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+(Lsh16x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
-(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
-(Lsh8x8 x y) -> (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+(Lsh8x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
// Cleaning up shift ops when input is masked
(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
(Addr {sym} base) -> (MOVDaddr {sym} base)
// (Addr {sym} base) -> (ADDconst {sym} base)
-(OffPtr [off] ptr) -> (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
+(OffPtr [off] ptr) -> (ADD (MOVDconst <fe.TypeInt64()> [off]) ptr)
(And64 x y) -> (AND x y)
(And32 x y) -> (AND x y)
(If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
(If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
-(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <config.fe.TypeBool()> cond)) yes no)
+(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <fe.TypeBool()> cond)) yes no)
// ***************************
// Above: lowering rules
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 ->
(ComplexMake
- (Load <config.fe.TypeFloat32()> ptr mem)
- (Load <config.fe.TypeFloat32()>
- (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr)
+ (Load <fe.TypeFloat32()> ptr mem)
+ (Load <fe.TypeFloat32()>
+ (OffPtr <fe.TypeFloat32().PtrTo()> [4] ptr)
mem)
)
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 8 ->
- (Store {config.fe.TypeFloat32()}
- (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst)
+ (Store {fe.TypeFloat32()}
+ (OffPtr <fe.TypeFloat32().PtrTo()> [4] dst)
imag
- (Store {config.fe.TypeFloat32()} dst real mem))
+ (Store {fe.TypeFloat32()} dst real mem))
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
(ComplexMake
- (Load <config.fe.TypeFloat64()> ptr mem)
- (Load <config.fe.TypeFloat64()>
- (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr)
+ (Load <fe.TypeFloat64()> ptr mem)
+ (Load <fe.TypeFloat64()>
+ (OffPtr <fe.TypeFloat64().PtrTo()> [8] ptr)
mem)
)
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 16 ->
- (Store {config.fe.TypeFloat64()}
- (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst)
+ (Store {fe.TypeFloat64()}
+ (OffPtr <fe.TypeFloat64().PtrTo()> [8] dst)
imag
- (Store {config.fe.TypeFloat64()} dst real mem))
+ (Store {fe.TypeFloat64()} dst real mem))
// string ops
(StringPtr (StringMake ptr _)) -> ptr
(Load <t> ptr mem) && t.IsString() ->
(StringMake
- (Load <config.fe.TypeBytePtr()> ptr mem)
- (Load <config.fe.TypeInt()>
- (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
+ (Load <fe.TypeBytePtr()> ptr mem)
+ (Load <fe.TypeInt()>
+ (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
mem))
(Store dst (StringMake ptr len) mem) ->
- (Store {config.fe.TypeInt()}
- (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)
+ (Store {fe.TypeInt()}
+ (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst)
len
- (Store {config.fe.TypeBytePtr()} dst ptr mem))
+ (Store {fe.TypeBytePtr()} dst ptr mem))
// slice ops
(SlicePtr (SliceMake ptr _ _ )) -> ptr
(Load <t> ptr mem) && t.IsSlice() ->
(SliceMake
(Load <t.ElemType().PtrTo()> ptr mem)
- (Load <config.fe.TypeInt()>
- (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
+ (Load <fe.TypeInt()>
+ (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
mem)
- (Load <config.fe.TypeInt()>
- (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)
+ (Load <fe.TypeInt()>
+ (OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)
mem))
(Store dst (SliceMake ptr len cap) mem) ->
- (Store {config.fe.TypeInt()}
- (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)
+ (Store {fe.TypeInt()}
+ (OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)
cap
- (Store {config.fe.TypeInt()}
- (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)
+ (Store {fe.TypeInt()}
+ (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst)
len
- (Store {config.fe.TypeBytePtr()} dst ptr mem)))
+ (Store {fe.TypeBytePtr()} dst ptr mem)))
// interface ops
(ITab (IMake itab _)) -> itab
(Load <t> ptr mem) && t.IsInterface() ->
(IMake
- (Load <config.fe.TypeBytePtr()> ptr mem)
- (Load <config.fe.TypeBytePtr()>
- (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)
+ (Load <fe.TypeBytePtr()> ptr mem)
+ (Load <fe.TypeBytePtr()>
+ (OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)
mem))
(Store dst (IMake itab data) mem) ->
- (Store {config.fe.TypeBytePtr()}
- (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)
+ (Store {fe.TypeBytePtr()}
+ (OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)
data
- (Store {config.fe.TypeUintptr()} dst itab mem))
+ (Store {fe.TypeUintptr()} dst itab mem))
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() ->
(Int64Make
- (Load <config.fe.TypeInt32()> (OffPtr <config.fe.TypeInt32().PtrTo()> [4] ptr) mem)
- (Load <config.fe.TypeUInt32()> ptr mem))
+ (Load <fe.TypeInt32()> (OffPtr <fe.TypeInt32().PtrTo()> [4] ptr) mem)
+ (Load <fe.TypeUInt32()> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() ->
(Int64Make
- (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem)
- (Load <config.fe.TypeUInt32()> ptr mem))
+ (Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem)
+ (Load <fe.TypeUInt32()> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() ->
(Int64Make
- (Load <config.fe.TypeInt32()> ptr mem)
- (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
+ (Load <fe.TypeInt32()> ptr mem)
+ (Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() ->
(Int64Make
- (Load <config.fe.TypeUInt32()> ptr mem)
- (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
+ (Load <fe.TypeUInt32()> ptr mem)
+ (Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem))
(Store {t} dst (Int64Make hi lo) mem) && t.(Type).Size() == 8 && !config.BigEndian ->
(Store {hi.Type}
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() ->
(Int64Make
- (Arg <config.fe.TypeInt32()> {n} [off+4])
- (Arg <config.fe.TypeUInt32()> {n} [off]))
+ (Arg <fe.TypeInt32()> {n} [off+4])
+ (Arg <fe.TypeUInt32()> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() ->
(Int64Make
- (Arg <config.fe.TypeUInt32()> {n} [off+4])
- (Arg <config.fe.TypeUInt32()> {n} [off]))
+ (Arg <fe.TypeUInt32()> {n} [off+4])
+ (Arg <fe.TypeUInt32()> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() ->
(Int64Make
- (Arg <config.fe.TypeInt32()> {n} [off])
- (Arg <config.fe.TypeUInt32()> {n} [off+4]))
+ (Arg <fe.TypeInt32()> {n} [off])
+ (Arg <fe.TypeUInt32()> {n} [off+4]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() ->
(Int64Make
- (Arg <config.fe.TypeUInt32()> {n} [off])
- (Arg <config.fe.TypeUInt32()> {n} [off+4]))
+ (Arg <fe.TypeUInt32()> {n} [off])
+ (Arg <fe.TypeUInt32()> {n} [off+4]))
(Add64 x y) ->
(Int64Make
- (Add32withcarry <config.fe.TypeInt32()>
+ (Add32withcarry <fe.TypeInt32()>
(Int64Hi x)
(Int64Hi y)
(Select1 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
- (Select0 <config.fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
(Sub64 x y) ->
(Int64Make
- (Sub32withcarry <config.fe.TypeInt32()>
+ (Sub32withcarry <fe.TypeInt32()>
(Int64Hi x)
(Int64Hi y)
(Select1 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
- (Select0 <config.fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
(Mul64 x y) ->
(Int64Make
- (Add32 <config.fe.TypeUInt32()>
- (Mul32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Hi y))
- (Add32 <config.fe.TypeUInt32()>
- (Mul32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Lo y))
- (Select0 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
- (Select1 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+ (Add32 <fe.TypeUInt32()>
+ (Mul32 <fe.TypeUInt32()> (Int64Lo x) (Int64Hi y))
+ (Add32 <fe.TypeUInt32()>
+ (Mul32 <fe.TypeUInt32()> (Int64Hi x) (Int64Lo y))
+ (Select0 <fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
+ (Select1 <fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
(And64 x y) ->
(Int64Make
- (And32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
- (And32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ (And32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
+ (And32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(Or64 x y) ->
(Int64Make
- (Or32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
- (Or32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ (Or32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
+ (Or32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(Xor64 x y) ->
(Int64Make
- (Xor32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
- (Xor32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ (Xor32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
+ (Xor32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
(Neg64 <t> x) -> (Sub64 (Const64 <t> [0]) x)
(Com64 x) ->
(Int64Make
- (Com32 <config.fe.TypeUInt32()> (Int64Hi x))
- (Com32 <config.fe.TypeUInt32()> (Int64Lo x)))
+ (Com32 <fe.TypeUInt32()> (Int64Hi x))
+ (Com32 <fe.TypeUInt32()> (Int64Lo x)))
(Ctz64 x) ->
- (Add32 <config.fe.TypeUInt32()>
- (Ctz32 <config.fe.TypeUInt32()> (Int64Lo x))
- (And32 <config.fe.TypeUInt32()>
- (Com32 <config.fe.TypeUInt32()> (Zeromask (Int64Lo x)))
- (Ctz32 <config.fe.TypeUInt32()> (Int64Hi x))))
+ (Add32 <fe.TypeUInt32()>
+ (Ctz32 <fe.TypeUInt32()> (Int64Lo x))
+ (And32 <fe.TypeUInt32()>
+ (Com32 <fe.TypeUInt32()> (Zeromask (Int64Lo x)))
+ (Ctz32 <fe.TypeUInt32()> (Int64Hi x))))
(BitLen64 x) ->
- (Add32 <config.fe.TypeInt()>
- (BitLen32 <config.fe.TypeInt()> (Int64Hi x))
- (BitLen32 <config.fe.TypeInt()>
- (Or32 <config.fe.TypeUInt32()>
+ (Add32 <fe.TypeInt()>
+ (BitLen32 <fe.TypeInt()> (Int64Hi x))
+ (BitLen32 <fe.TypeInt()>
+ (Or32 <fe.TypeUInt32()>
(Int64Lo x)
(Zeromask (Int64Hi x)))))
(Bswap64 x) ->
(Int64Make
- (Bswap32 <config.fe.TypeUInt32()> (Int64Lo x))
- (Bswap32 <config.fe.TypeUInt32()> (Int64Hi x)))
+ (Bswap32 <fe.TypeUInt32()> (Int64Lo x))
+ (Bswap32 <fe.TypeUInt32()> (Int64Hi x)))
(SignExt32to64 x) -> (Int64Make (Signmask x) x)
(SignExt16to64 x) -> (SignExt32to64 (SignExt16to32 x))
(SignExt8to64 x) -> (SignExt32to64 (SignExt8to32 x))
-(ZeroExt32to64 x) -> (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) x)
+(ZeroExt32to64 x) -> (Int64Make (Const32 <fe.TypeUInt32()> [0]) x)
(ZeroExt16to64 x) -> (ZeroExt32to64 (ZeroExt16to32 x))
(ZeroExt8to64 x) -> (ZeroExt32to64 (ZeroExt8to32 x))
// turn x64 non-constant shifts to x32 shifts
// if high 32-bit of the shift is nonzero, make a huge shift
(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Lsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Lsh64x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Rsh64x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh64Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Rsh64Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Lsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Lsh32x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Rsh32x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh32Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Rsh32Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Lsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Lsh16x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Rsh16x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh16Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Rsh16Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Lsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Lsh8x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Rsh8x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
- (Rsh8Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ (Rsh8Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
// 64x left shift
// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
// result.lo = lo<<s
(Lsh64x32 (Int64Make hi lo) s) ->
(Int64Make
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Lsh32x32 <config.fe.TypeUInt32()> hi s)
- (Rsh32Ux32 <config.fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Lsh32x32 <fe.TypeUInt32()> hi s)
+ (Rsh32Ux32 <fe.TypeUInt32()>
lo
- (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
- (Lsh32x32 <config.fe.TypeUInt32()>
+ (Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
+ (Lsh32x32 <fe.TypeUInt32()>
lo
- (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32]))))
- (Lsh32x32 <config.fe.TypeUInt32()> lo s))
+ (Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32]))))
+ (Lsh32x32 <fe.TypeUInt32()> lo s))
(Lsh64x16 (Int64Make hi lo) s) ->
(Int64Make
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Lsh32x16 <config.fe.TypeUInt32()> hi s)
- (Rsh32Ux16 <config.fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Lsh32x16 <fe.TypeUInt32()> hi s)
+ (Rsh32Ux16 <fe.TypeUInt32()>
lo
- (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
- (Lsh32x16 <config.fe.TypeUInt32()>
+ (Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
+ (Lsh32x16 <fe.TypeUInt32()>
lo
- (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32]))))
- (Lsh32x16 <config.fe.TypeUInt32()> lo s))
+ (Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32]))))
+ (Lsh32x16 <fe.TypeUInt32()> lo s))
(Lsh64x8 (Int64Make hi lo) s) ->
(Int64Make
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Lsh32x8 <config.fe.TypeUInt32()> hi s)
- (Rsh32Ux8 <config.fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Lsh32x8 <fe.TypeUInt32()> hi s)
+ (Rsh32Ux8 <fe.TypeUInt32()>
lo
- (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
- (Lsh32x8 <config.fe.TypeUInt32()>
+ (Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
+ (Lsh32x8 <fe.TypeUInt32()>
lo
- (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32]))))
- (Lsh32x8 <config.fe.TypeUInt32()> lo s))
+ (Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32]))))
+ (Lsh32x8 <fe.TypeUInt32()> lo s))
// 64x unsigned right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
(Rsh64Ux32 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32Ux32 <config.fe.TypeUInt32()> hi s)
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Rsh32Ux32 <config.fe.TypeUInt32()> lo s)
- (Lsh32x32 <config.fe.TypeUInt32()>
+ (Rsh32Ux32 <fe.TypeUInt32()> hi s)
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Rsh32Ux32 <fe.TypeUInt32()> lo s)
+ (Lsh32x32 <fe.TypeUInt32()>
hi
- (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
- (Rsh32Ux32 <config.fe.TypeUInt32()>
+ (Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
+ (Rsh32Ux32 <fe.TypeUInt32()>
hi
- (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))))
+ (Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32])))))
(Rsh64Ux16 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32Ux16 <config.fe.TypeUInt32()> hi s)
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Rsh32Ux16 <config.fe.TypeUInt32()> lo s)
- (Lsh32x16 <config.fe.TypeUInt32()>
+ (Rsh32Ux16 <fe.TypeUInt32()> hi s)
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Rsh32Ux16 <fe.TypeUInt32()> lo s)
+ (Lsh32x16 <fe.TypeUInt32()>
hi
- (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
- (Rsh32Ux16 <config.fe.TypeUInt32()>
+ (Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
+ (Rsh32Ux16 <fe.TypeUInt32()>
hi
- (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))))
+ (Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32])))))
(Rsh64Ux8 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32Ux8 <config.fe.TypeUInt32()> hi s)
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Rsh32Ux8 <config.fe.TypeUInt32()> lo s)
- (Lsh32x8 <config.fe.TypeUInt32()>
+ (Rsh32Ux8 <fe.TypeUInt32()> hi s)
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Rsh32Ux8 <fe.TypeUInt32()> lo s)
+ (Lsh32x8 <fe.TypeUInt32()>
hi
- (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
- (Rsh32Ux8 <config.fe.TypeUInt32()>
+ (Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
+ (Rsh32Ux8 <fe.TypeUInt32()>
hi
- (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))))
+ (Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32])))))
// 64x signed right shift
// result.hi = hi>>s
// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
(Rsh64x32 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32x32 <config.fe.TypeUInt32()> hi s)
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Rsh32Ux32 <config.fe.TypeUInt32()> lo s)
- (Lsh32x32 <config.fe.TypeUInt32()>
+ (Rsh32x32 <fe.TypeUInt32()> hi s)
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Rsh32Ux32 <fe.TypeUInt32()> lo s)
+ (Lsh32x32 <fe.TypeUInt32()>
hi
- (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s)))
- (And32 <config.fe.TypeUInt32()>
- (Rsh32x32 <config.fe.TypeUInt32()>
+ (Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
+ (And32 <fe.TypeUInt32()>
+ (Rsh32x32 <fe.TypeUInt32()>
hi
- (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))
+ (Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32])))
(Zeromask
- (Rsh32Ux32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [5]))))))
+ (Rsh32Ux32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [5]))))))
(Rsh64x16 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32x16 <config.fe.TypeUInt32()> hi s)
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Rsh32Ux16 <config.fe.TypeUInt32()> lo s)
- (Lsh32x16 <config.fe.TypeUInt32()>
+ (Rsh32x16 <fe.TypeUInt32()> hi s)
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Rsh32Ux16 <fe.TypeUInt32()> lo s)
+ (Lsh32x16 <fe.TypeUInt32()>
hi
- (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s)))
- (And32 <config.fe.TypeUInt32()>
- (Rsh32x16 <config.fe.TypeUInt32()>
+ (Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
+ (And32 <fe.TypeUInt32()>
+ (Rsh32x16 <fe.TypeUInt32()>
hi
- (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))
+ (Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32])))
(Zeromask
(ZeroExt16to32
- (Rsh16Ux32 <config.fe.TypeUInt16()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
+ (Rsh16Ux32 <fe.TypeUInt16()> s (Const32 <fe.TypeUInt32()> [5])))))))
(Rsh64x8 (Int64Make hi lo) s) ->
(Int64Make
- (Rsh32x8 <config.fe.TypeUInt32()> hi s)
- (Or32 <config.fe.TypeUInt32()>
- (Or32 <config.fe.TypeUInt32()>
- (Rsh32Ux8 <config.fe.TypeUInt32()> lo s)
- (Lsh32x8 <config.fe.TypeUInt32()>
+ (Rsh32x8 <fe.TypeUInt32()> hi s)
+ (Or32 <fe.TypeUInt32()>
+ (Or32 <fe.TypeUInt32()>
+ (Rsh32Ux8 <fe.TypeUInt32()> lo s)
+ (Lsh32x8 <fe.TypeUInt32()>
hi
- (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s)))
- (And32 <config.fe.TypeUInt32()>
- (Rsh32x8 <config.fe.TypeUInt32()>
+ (Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
+ (And32 <fe.TypeUInt32()>
+ (Rsh32x8 <fe.TypeUInt32()>
hi
- (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))
+ (Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32])))
(Zeromask
(ZeroExt8to32
- (Rsh8Ux32 <config.fe.TypeUInt8()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
+ (Rsh8Ux32 <fe.TypeUInt8()> s (Const32 <fe.TypeUInt32()> [5])))))))
// 64xConst32 shifts
// we probably do not need them -- lateopt may take care of them just fine
//
//(Lsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
-// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c-32]))
-// (Const32 <config.fe.TypeUInt32()> [0]))
+// (Lsh32x32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c-32]))
+// (Const32 <fe.TypeUInt32()> [0]))
//(Rsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
// (Signmask (Int64Hi x))
-// (Rsh32x32 <config.fe.TypeInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c-32])))
+// (Rsh32x32 <fe.TypeInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c-32])))
//(Rsh64Ux32 x (Const32 [c])) && c < 64 && c > 32 ->
// (Int64Make
-// (Const32 <config.fe.TypeUInt32()> [0])
-// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c-32])))
+// (Const32 <fe.TypeUInt32()> [0])
+// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c-32])))
//
-//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [0]))
+//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <fe.TypeUInt32()> [0]))
//(Rsh64x32 x (Const32 [32])) -> (Int64Make (Signmask (Int64Hi x)) (Int64Hi x))
-//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) (Int64Hi x))
+//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <fe.TypeUInt32()> [0]) (Int64Hi x))
//
//(Lsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
-// (Or32 <config.fe.TypeUInt32()>
-// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
-// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [32-c])))
-// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c])))
+// (Or32 <fe.TypeUInt32()>
+// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
+// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [32-c])))
+// (Lsh32x32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c])))
//(Rsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
-// (Rsh32x32 <config.fe.TypeInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
-// (Or32 <config.fe.TypeUInt32()>
-// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c]))
-// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [32-c]))))
+// (Rsh32x32 <fe.TypeInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
+// (Or32 <fe.TypeUInt32()>
+// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c]))
+// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [32-c]))))
//(Rsh64Ux32 x (Const32 [c])) && c < 32 && c > 0 ->
// (Int64Make
-// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [c]))
-// (Or32 <config.fe.TypeUInt32()>
-// (Rsh32Ux32 <config.fe.TypeUInt32()> (Int64Lo x) (Const32 <config.fe.TypeUInt32()> [c]))
-// (Lsh32x32 <config.fe.TypeUInt32()> (Int64Hi x) (Const32 <config.fe.TypeUInt32()> [32-c]))))
+// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
+// (Or32 <fe.TypeUInt32()>
+// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c]))
+// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [32-c]))))
//
//(Lsh64x32 x (Const32 [0])) -> x
//(Rsh64x32 x (Const32 [0])) -> x
//(Rsh64Ux32 x (Const32 [0])) -> x
(Const64 <t> [c]) && t.IsSigned() ->
- (Int64Make (Const32 <config.fe.TypeInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
+ (Int64Make (Const32 <fe.TypeInt32()> [c>>32]) (Const32 <fe.TypeUInt32()> [int64(int32(c))]))
(Const64 <t> [c]) && !t.IsSigned() ->
- (Int64Make (Const32 <config.fe.TypeUInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
+ (Int64Make (Const32 <fe.TypeUInt32()> [c>>32]) (Const32 <fe.TypeUInt32()> [int64(int32(c))]))
(Eq64 x y) ->
(AndB
(Mul64 (Const64 [-1]) x) -> (Neg64 x)
// Convert multiplication by a power of two to a shift.
-(Mul8 <t> n (Const8 [c])) && isPowerOfTwo(c) -> (Lsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
-(Mul16 <t> n (Const16 [c])) && isPowerOfTwo(c) -> (Lsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
-(Mul32 <t> n (Const32 [c])) && isPowerOfTwo(c) -> (Lsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
-(Mul64 <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Lsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
-(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg8 (Lsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
-(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg16 (Lsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
-(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg32 (Lsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
-(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg64 (Lsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
+(Mul8 <t> n (Const8 [c])) && isPowerOfTwo(c) -> (Lsh8x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
+(Mul16 <t> n (Const16 [c])) && isPowerOfTwo(c) -> (Lsh16x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
+(Mul32 <t> n (Const32 [c])) && isPowerOfTwo(c) -> (Lsh32x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
+(Mul64 <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Lsh64x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
+(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg8 (Lsh8x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
+(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg16 (Lsh16x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
+(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg32 (Lsh32x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
+(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg64 (Lsh64x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(int8(c % d))])
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(c % d))])
// ((x >> c1) << c2) >> c3
(Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ -> (Rsh64Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Rsh32Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ -> (Rsh32Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Rsh16Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ -> (Rsh16Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Rsh8Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ -> (Rsh8Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
// ((x << c1) >> c2) << c3
(Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ -> (Lsh64x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Lsh32x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ -> (Lsh32x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Lsh16x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ -> (Lsh16x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
(Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- -> (Lsh8x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ -> (Lsh8x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
// replace shifts with zero extensions
-(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (ZeroExt8to16 (Trunc16to8 <config.fe.TypeUInt8()> x))
-(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (ZeroExt8to32 (Trunc32to8 <config.fe.TypeUInt8()> x))
-(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (ZeroExt8to64 (Trunc64to8 <config.fe.TypeUInt8()> x))
-(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (ZeroExt16to32 (Trunc32to16 <config.fe.TypeUInt16()> x))
-(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (ZeroExt16to64 (Trunc64to16 <config.fe.TypeUInt16()> x))
-(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (ZeroExt32to64 (Trunc64to32 <config.fe.TypeUInt32()> x))
+(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (ZeroExt8to16 (Trunc16to8 <fe.TypeUInt8()> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (ZeroExt8to32 (Trunc32to8 <fe.TypeUInt8()> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (ZeroExt8to64 (Trunc64to8 <fe.TypeUInt8()> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (ZeroExt16to32 (Trunc32to16 <fe.TypeUInt16()> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (ZeroExt16to64 (Trunc64to16 <fe.TypeUInt16()> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (ZeroExt32to64 (Trunc64to32 <fe.TypeUInt32()> x))
// replace shifts with sign extensions
-(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (SignExt8to16 (Trunc16to8 <config.fe.TypeInt8()> x))
-(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (SignExt8to32 (Trunc32to8 <config.fe.TypeInt8()> x))
-(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (SignExt8to64 (Trunc64to8 <config.fe.TypeInt8()> x))
-(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (SignExt16to32 (Trunc32to16 <config.fe.TypeInt16()> x))
-(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (SignExt16to64 (Trunc64to16 <config.fe.TypeInt16()> x))
-(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (SignExt32to64 (Trunc64to32 <config.fe.TypeInt32()> x))
+(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (SignExt8to16 (Trunc16to8 <fe.TypeInt8()> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (SignExt8to32 (Trunc32to8 <fe.TypeInt8()> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (SignExt8to64 (Trunc64to8 <fe.TypeInt8()> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (SignExt16to32 (Trunc32to16 <fe.TypeInt16()> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (SignExt16to64 (Trunc64to16 <fe.TypeInt16()> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (SignExt32to64 (Trunc64to32 <fe.TypeInt32()> x))
// constant comparisons
(Eq64 (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(c == d)])
// indexing operations
// Note: bounds check has already been done
-(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <config.fe.TypeInt()> idx (Const32 <config.fe.TypeInt()> [t.ElemType().Size()])))
-(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.ElemType().Size()])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <fe.TypeInt()> idx (Const32 <fe.TypeInt()> [t.ElemType().Size()])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <fe.TypeInt()> idx (Const64 <fe.TypeInt()> [t.ElemType().Size()])))
// struct operations
(StructSelect (StructMake1 x)) -> x
(StructSelect [2] (StructMake4 _ _ x _)) -> x
(StructSelect [3] (StructMake4 _ _ _ x)) -> x
-(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t) ->
+(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) ->
(StructMake0)
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t) ->
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) ->
(StructMake1
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t) ->
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) ->
(StructMake2
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t) ->
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) ->
(StructMake3
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
(Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
-(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t) ->
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) ->
(StructMake4
(Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
(Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
(Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)
(Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
-(StructSelect [i] x:(Load <t> ptr mem)) && !config.fe.CanSSA(t) ->
+(StructSelect [i] x:(Load <t> ptr mem)) && !fe.CanSSA(t) ->
@x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
(Store _ (StructMake0) mem) -> mem
(StructSelect [0] x:(IData _)) -> x
// un-SSAable values use mem->mem copies
-(Store {t} dst (Load src mem) mem) && !config.fe.CanSSA(t.(Type)) ->
+(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t.(Type)) ->
(Move {t} [t.(Type).Size()] dst src mem)
-(Store {t} dst (Load src mem) (VarDef {x} mem)) && !config.fe.CanSSA(t.(Type)) ->
+(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t.(Type)) ->
(Move {t} [t.(Type).Size()] dst src (VarDef {x} mem))
// array ops
(Load <t> _ _) && t.IsArray() && t.NumElem() == 0 ->
(ArrayMake0)
-(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t) ->
+(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) ->
(ArrayMake1 (Load <t.ElemType()> ptr mem))
(Store _ (ArrayMake0) mem) -> mem
(StringPtr (StringMake (Const64 <t> [c]) _)) -> (Const64 <t> [c])
(StringLen (StringMake _ (Const64 <t> [c]))) -> (Const64 <t> [c])
(ConstString {s}) && config.PtrSize == 4 && s.(string) == "" ->
- (StringMake (ConstNil) (Const32 <config.fe.TypeInt()> [0]))
+ (StringMake (ConstNil) (Const32 <fe.TypeInt()> [0]))
(ConstString {s}) && config.PtrSize == 8 && s.(string) == "" ->
- (StringMake (ConstNil) (Const64 <config.fe.TypeInt()> [0]))
+ (StringMake (ConstNil) (Const64 <fe.TypeInt()> [0]))
(ConstString {s}) && config.PtrSize == 4 && s.(string) != "" ->
(StringMake
- (Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}
+ (Addr <fe.TypeBytePtr()> {fe.StringData(s.(string))}
(SB))
- (Const32 <config.fe.TypeInt()> [int64(len(s.(string)))]))
+ (Const32 <fe.TypeInt()> [int64(len(s.(string)))]))
(ConstString {s}) && config.PtrSize == 8 && s.(string) != "" ->
(StringMake
- (Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}
+ (Addr <fe.TypeBytePtr()> {fe.StringData(s.(string))}
(SB))
- (Const64 <config.fe.TypeInt()> [int64(len(s.(string)))]))
+ (Const64 <fe.TypeInt()> [int64(len(s.(string)))]))
// slice ops
// Only a few slice rules are provided here. See dec.rules for
(ConstSlice) && config.PtrSize == 4 ->
(SliceMake
(ConstNil <v.Type.ElemType().PtrTo()>)
- (Const32 <config.fe.TypeInt()> [0])
- (Const32 <config.fe.TypeInt()> [0]))
+ (Const32 <fe.TypeInt()> [0])
+ (Const32 <fe.TypeInt()> [0]))
(ConstSlice) && config.PtrSize == 8 ->
(SliceMake
(ConstNil <v.Type.ElemType().PtrTo()>)
- (Const64 <config.fe.TypeInt()> [0])
- (Const64 <config.fe.TypeInt()> [0]))
+ (Const64 <fe.TypeInt()> [0])
+ (Const64 <fe.TypeInt()> [0]))
// interface ops
(ConstInterface) ->
(IMake
- (ConstNil <config.fe.TypeBytePtr()>)
- (ConstNil <config.fe.TypeBytePtr()>))
+ (ConstNil <fe.TypeBytePtr()>)
+ (ConstNil <fe.TypeBytePtr()>))
(NilCheck (GetG mem) mem) -> mem
// Decompose compound argument values
(Arg {n} [off]) && v.Type.IsString() ->
(StringMake
- (Arg <config.fe.TypeBytePtr()> {n} [off])
- (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
+ (Arg <fe.TypeBytePtr()> {n} [off])
+ (Arg <fe.TypeInt()> {n} [off+config.PtrSize]))
(Arg {n} [off]) && v.Type.IsSlice() ->
(SliceMake
(Arg <v.Type.ElemType().PtrTo()> {n} [off])
- (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize])
- (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
+ (Arg <fe.TypeInt()> {n} [off+config.PtrSize])
+ (Arg <fe.TypeInt()> {n} [off+2*config.PtrSize]))
(Arg {n} [off]) && v.Type.IsInterface() ->
(IMake
- (Arg <config.fe.TypeBytePtr()> {n} [off])
- (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
+ (Arg <fe.TypeBytePtr()> {n} [off])
+ (Arg <fe.TypeBytePtr()> {n} [off+config.PtrSize]))
(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 ->
(ComplexMake
- (Arg <config.fe.TypeFloat64()> {n} [off])
- (Arg <config.fe.TypeFloat64()> {n} [off+8]))
+ (Arg <fe.TypeFloat64()> {n} [off])
+ (Arg <fe.TypeFloat64()> {n} [off+8]))
(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 ->
(ComplexMake
- (Arg <config.fe.TypeFloat32()> {n} [off])
- (Arg <config.fe.TypeFloat32()> {n} [off+4]))
+ (Arg <fe.TypeFloat32()> {n} [off])
+ (Arg <fe.TypeFloat32()> {n} [off+4]))
-(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t) ->
+(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) ->
(StructMake0)
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t) ->
+(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) ->
(StructMake1
(Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t) ->
+(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) ->
(StructMake2
(Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])
(Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t) ->
+(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) ->
(StructMake3
(Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])
(Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])
(Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t) ->
+(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) ->
(StructMake4
(Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])
(Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])
(Arg <t>) && t.IsArray() && t.NumElem() == 0 ->
(ArrayMake0)
-(Arg <t> {n} [off]) && t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t) ->
+(Arg <t> {n} [off]) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) ->
(ArrayMake1 (Arg <t.ElemType()> {n} [off]))
// strength reduction of divide by a constant.
// See ../magic.go for a detailed description of these algorithms.
// Unsigned divide by power of 2. Strength reduce to a shift.
-(Div8u n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xff)]))
-(Div16u n (Const16 [c])) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xffff)]))
-(Div32u n (Const32 [c])) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xffffffff)]))
-(Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+(Div8u n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xff)]))
+(Div16u n (Const16 [c])) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xffff)]))
+(Div32u n (Const32 [c])) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xffffffff)]))
+(Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <fe.TypeUInt64()> [log2(c)]))
// Unsigned divide, not a power of 2. Strength reduce to a multiply.
// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply.
(Div8u x (Const8 [c])) && umagicOK(8, c) ->
(Trunc32to8
- (Rsh32Ux64 <config.fe.TypeUInt32()>
- (Mul32 <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(1<<8+umagic(8,c).m)])
+ (Rsh32Ux64 <fe.TypeUInt32()>
+ (Mul32 <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(1<<8+umagic(8,c).m)])
(ZeroExt8to32 x))
- (Const64 <config.fe.TypeUInt64()> [8+umagic(8,c).s])))
+ (Const64 <fe.TypeUInt64()> [8+umagic(8,c).s])))
// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply.
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 8 ->
(Trunc64to16
- (Rsh64Ux64 <config.fe.TypeUInt64()>
- (Mul64 <config.fe.TypeUInt64()>
- (Const64 <config.fe.TypeUInt64()> [int64(1<<16+umagic(16,c).m)])
+ (Rsh64Ux64 <fe.TypeUInt64()>
+ (Mul64 <fe.TypeUInt64()>
+ (Const64 <fe.TypeUInt64()> [int64(1<<16+umagic(16,c).m)])
(ZeroExt16to64 x))
- (Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s])))
+ (Const64 <fe.TypeUInt64()> [16+umagic(16,c).s])))
// For 16-bit divides on 32-bit machines
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0 ->
(Trunc32to16
- (Rsh32Ux64 <config.fe.TypeUInt32()>
- (Mul32 <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(1<<15+umagic(16,c).m/2)])
+ (Rsh32Ux64 <fe.TypeUInt32()>
+ (Mul32 <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(1<<15+umagic(16,c).m/2)])
(ZeroExt16to32 x))
- (Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-1])))
+ (Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-1])))
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && c&1 == 0 ->
(Trunc32to16
- (Rsh32Ux64 <config.fe.TypeUInt32()>
- (Mul32 <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(1<<15+(umagic(16,c).m+1)/2)])
- (Rsh32Ux64 <config.fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <config.fe.TypeUInt64()> [1])))
- (Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-2])))
+ (Rsh32Ux64 <fe.TypeUInt32()>
+ (Mul32 <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(1<<15+(umagic(16,c).m+1)/2)])
+ (Rsh32Ux64 <fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <fe.TypeUInt64()> [1])))
+ (Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-2])))
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 ->
(Trunc32to16
- (Rsh32Ux64 <config.fe.TypeUInt32()>
+ (Rsh32Ux64 <fe.TypeUInt32()>
(Avg32u
- (Lsh32x64 <config.fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <config.fe.TypeUInt64()> [16]))
- (Mul32 <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(umagic(16,c).m)])
+ (Lsh32x64 <fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <fe.TypeUInt64()> [16]))
+ (Mul32 <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(umagic(16,c).m)])
(ZeroExt16to32 x)))
- (Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-1])))
+ (Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-1])))
// For 32-bit divides on 32-bit machines
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0 ->
- (Rsh32Ux64 <config.fe.TypeUInt32()>
- (Hmul32u <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(int32(1<<31+umagic(32,c).m/2))])
+ (Rsh32Ux64 <fe.TypeUInt32()>
+ (Hmul32u <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(int32(1<<31+umagic(32,c).m/2))])
x)
- (Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-1]))
+ (Const64 <fe.TypeUInt64()> [umagic(32,c).s-1]))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && c&1 == 0 ->
- (Rsh32Ux64 <config.fe.TypeUInt32()>
- (Hmul32u <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
- (Rsh32Ux64 <config.fe.TypeUInt32()> x (Const64 <config.fe.TypeUInt64()> [1])))
- (Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-2]))
+ (Rsh32Ux64 <fe.TypeUInt32()>
+ (Hmul32u <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
+ (Rsh32Ux64 <fe.TypeUInt32()> x (Const64 <fe.TypeUInt64()> [1])))
+ (Const64 <fe.TypeUInt64()> [umagic(32,c).s-2]))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 ->
- (Rsh32Ux64 <config.fe.TypeUInt32()>
+ (Rsh32Ux64 <fe.TypeUInt32()>
(Avg32u
x
- (Hmul32u <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(int32(umagic(32,c).m))])
+ (Hmul32u <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(int32(umagic(32,c).m))])
x))
- (Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-1]))
+ (Const64 <fe.TypeUInt64()> [umagic(32,c).s-1]))
// For 32-bit divides on 64-bit machines
// We'll use a regular (non-hi) multiply for this case.
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0 ->
(Trunc64to32
- (Rsh64Ux64 <config.fe.TypeUInt64()>
- (Mul64 <config.fe.TypeUInt64()>
- (Const64 <config.fe.TypeUInt64()> [int64(1<<31+umagic(32,c).m/2)])
+ (Rsh64Ux64 <fe.TypeUInt64()>
+ (Mul64 <fe.TypeUInt64()>
+ (Const64 <fe.TypeUInt64()> [int64(1<<31+umagic(32,c).m/2)])
(ZeroExt32to64 x))
- (Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-1])))
+ (Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-1])))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && c&1 == 0 ->
(Trunc64to32
- (Rsh64Ux64 <config.fe.TypeUInt64()>
- (Mul64 <config.fe.TypeUInt64()>
- (Const64 <config.fe.TypeUInt64()> [int64(1<<31+(umagic(32,c).m+1)/2)])
- (Rsh64Ux64 <config.fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <config.fe.TypeUInt64()> [1])))
- (Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-2])))
+ (Rsh64Ux64 <fe.TypeUInt64()>
+ (Mul64 <fe.TypeUInt64()>
+ (Const64 <fe.TypeUInt64()> [int64(1<<31+(umagic(32,c).m+1)/2)])
+ (Rsh64Ux64 <fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <fe.TypeUInt64()> [1])))
+ (Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-2])))
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 ->
(Trunc64to32
- (Rsh64Ux64 <config.fe.TypeUInt64()>
+ (Rsh64Ux64 <fe.TypeUInt64()>
(Avg64u
- (Lsh64x64 <config.fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <config.fe.TypeUInt64()> [32]))
- (Mul64 <config.fe.TypeUInt64()>
- (Const64 <config.fe.TypeUInt32()> [int64(umagic(32,c).m)])
+ (Lsh64x64 <fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <fe.TypeUInt64()> [32]))
+ (Mul64 <fe.TypeUInt64()>
+ (Const64 <fe.TypeUInt32()> [int64(umagic(32,c).m)])
(ZeroExt32to64 x)))
- (Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-1])))
+ (Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-1])))
// For 64-bit divides on 64-bit machines
// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0 ->
- (Rsh64Ux64 <config.fe.TypeUInt64()>
- (Hmul64u <config.fe.TypeUInt64()>
- (Const64 <config.fe.TypeUInt64()> [int64(1<<63+umagic(64,c).m/2)])
+ (Rsh64Ux64 <fe.TypeUInt64()>
+ (Hmul64u <fe.TypeUInt64()>
+ (Const64 <fe.TypeUInt64()> [int64(1<<63+umagic(64,c).m/2)])
x)
- (Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-1]))
+ (Const64 <fe.TypeUInt64()> [umagic(64,c).s-1]))
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && c&1 == 0 ->
- (Rsh64Ux64 <config.fe.TypeUInt64()>
- (Hmul64u <config.fe.TypeUInt64()>
- (Const64 <config.fe.TypeUInt64()> [int64(1<<63+(umagic(64,c).m+1)/2)])
- (Rsh64Ux64 <config.fe.TypeUInt64()> x (Const64 <config.fe.TypeUInt64()> [1])))
- (Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-2]))
+ (Rsh64Ux64 <fe.TypeUInt64()>
+ (Hmul64u <fe.TypeUInt64()>
+ (Const64 <fe.TypeUInt64()> [int64(1<<63+(umagic(64,c).m+1)/2)])
+ (Rsh64Ux64 <fe.TypeUInt64()> x (Const64 <fe.TypeUInt64()> [1])))
+ (Const64 <fe.TypeUInt64()> [umagic(64,c).s-2]))
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 ->
- (Rsh64Ux64 <config.fe.TypeUInt64()>
+ (Rsh64Ux64 <fe.TypeUInt64()>
(Avg64u
x
- (Hmul64u <config.fe.TypeUInt64()>
- (Const64 <config.fe.TypeUInt64()> [int64(umagic(64,c).m)])
+ (Hmul64u <fe.TypeUInt64()>
+ (Const64 <fe.TypeUInt64()> [int64(umagic(64,c).m)])
x))
- (Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-1]))
+ (Const64 <fe.TypeUInt64()> [umagic(64,c).s-1]))
// Signed divide by a negative constant. Rewrite to divide by a positive constant.
(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 -> (Neg8 (Div8 <t> n (Const8 <t> [-c])))
// Dividing by the most-negative number. Result is always 0 except
// if the input is also the most-negative number.
// We can detect that using the sign bit of x & -x.
-(Div8 <t> x (Const8 [-1<<7 ])) -> (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <config.fe.TypeUInt64()> [7 ]))
-(Div16 <t> x (Const16 [-1<<15])) -> (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <config.fe.TypeUInt64()> [15]))
-(Div32 <t> x (Const32 [-1<<31])) -> (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <config.fe.TypeUInt64()> [31]))
-(Div64 <t> x (Const64 [-1<<63])) -> (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <config.fe.TypeUInt64()> [63]))
+(Div8 <t> x (Const8 [-1<<7 ])) -> (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <fe.TypeUInt64()> [7 ]))
+(Div16 <t> x (Const16 [-1<<15])) -> (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <fe.TypeUInt64()> [15]))
+(Div32 <t> x (Const32 [-1<<31])) -> (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <fe.TypeUInt64()> [31]))
+(Div64 <t> x (Const64 [-1<<63])) -> (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <fe.TypeUInt64()> [63]))
// Signed divide by power of 2.
// n / c = n >> log(c) if n >= 0
// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned).
(Div8 <t> n (Const8 [c])) && isPowerOfTwo(c) ->
(Rsh8x64
- (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [ 7])) (Const64 <config.fe.TypeUInt64()> [ 8-log2(c)])))
- (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <fe.TypeUInt64()> [ 7])) (Const64 <fe.TypeUInt64()> [ 8-log2(c)])))
+ (Const64 <fe.TypeUInt64()> [log2(c)]))
(Div16 <t> n (Const16 [c])) && isPowerOfTwo(c) ->
(Rsh16x64
- (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [15])) (Const64 <config.fe.TypeUInt64()> [16-log2(c)])))
- (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <fe.TypeUInt64()> [15])) (Const64 <fe.TypeUInt64()> [16-log2(c)])))
+ (Const64 <fe.TypeUInt64()> [log2(c)]))
(Div32 <t> n (Const32 [c])) && isPowerOfTwo(c) ->
(Rsh32x64
- (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [31])) (Const64 <config.fe.TypeUInt64()> [32-log2(c)])))
- (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <fe.TypeUInt64()> [31])) (Const64 <fe.TypeUInt64()> [32-log2(c)])))
+ (Const64 <fe.TypeUInt64()> [log2(c)]))
(Div64 <t> n (Const64 [c])) && isPowerOfTwo(c) ->
(Rsh64x64
- (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [63])) (Const64 <config.fe.TypeUInt64()> [64-log2(c)])))
- (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <fe.TypeUInt64()> [63])) (Const64 <fe.TypeUInt64()> [64-log2(c)])))
+ (Const64 <fe.TypeUInt64()> [log2(c)]))
// Signed divide, not a power of 2. Strength reduce to a multiply.
(Div8 <t> x (Const8 [c])) && smagicOK(8,c) ->
(Sub8 <t>
(Rsh32x64 <t>
- (Mul32 <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(smagic(8,c).m)])
+ (Mul32 <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(smagic(8,c).m)])
(SignExt8to32 x))
- (Const64 <config.fe.TypeUInt64()> [8+smagic(8,c).s]))
+ (Const64 <fe.TypeUInt64()> [8+smagic(8,c).s]))
(Rsh32x64 <t>
(SignExt8to32 x)
- (Const64 <config.fe.TypeUInt64()> [31])))
+ (Const64 <fe.TypeUInt64()> [31])))
(Div16 <t> x (Const16 [c])) && smagicOK(16,c) ->
(Sub16 <t>
(Rsh32x64 <t>
- (Mul32 <config.fe.TypeUInt32()>
- (Const32 <config.fe.TypeUInt32()> [int64(smagic(16,c).m)])
+ (Mul32 <fe.TypeUInt32()>
+ (Const32 <fe.TypeUInt32()> [int64(smagic(16,c).m)])
(SignExt16to32 x))
- (Const64 <config.fe.TypeUInt64()> [16+smagic(16,c).s]))
+ (Const64 <fe.TypeUInt64()> [16+smagic(16,c).s]))
(Rsh32x64 <t>
(SignExt16to32 x)
- (Const64 <config.fe.TypeUInt64()> [31])))
+ (Const64 <fe.TypeUInt64()> [31])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 8 ->
(Sub32 <t>
(Rsh64x64 <t>
- (Mul64 <config.fe.TypeUInt64()>
- (Const64 <config.fe.TypeUInt64()> [int64(smagic(32,c).m)])
+ (Mul64 <fe.TypeUInt64()>
+ (Const64 <fe.TypeUInt64()> [int64(smagic(32,c).m)])
(SignExt32to64 x))
- (Const64 <config.fe.TypeUInt64()> [32+smagic(32,c).s]))
+ (Const64 <fe.TypeUInt64()> [32+smagic(32,c).s]))
(Rsh64x64 <t>
(SignExt32to64 x)
- (Const64 <config.fe.TypeUInt64()> [63])))
+ (Const64 <fe.TypeUInt64()> [63])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 == 0 ->
(Sub32 <t>
(Rsh32x64 <t>
(Hmul32 <t>
- (Const32 <config.fe.TypeUInt32()> [int64(int32(smagic(32,c).m/2))])
+ (Const32 <fe.TypeUInt32()> [int64(int32(smagic(32,c).m/2))])
x)
- (Const64 <config.fe.TypeUInt64()> [smagic(32,c).s-1]))
+ (Const64 <fe.TypeUInt64()> [smagic(32,c).s-1]))
(Rsh32x64 <t>
x
- (Const64 <config.fe.TypeUInt64()> [31])))
+ (Const64 <fe.TypeUInt64()> [31])))
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 != 0 ->
(Sub32 <t>
(Rsh32x64 <t>
(Add32 <t>
(Hmul32 <t>
- (Const32 <config.fe.TypeUInt32()> [int64(int32(smagic(32,c).m))])
+ (Const32 <fe.TypeUInt32()> [int64(int32(smagic(32,c).m))])
x)
x)
- (Const64 <config.fe.TypeUInt64()> [smagic(32,c).s]))
+ (Const64 <fe.TypeUInt64()> [smagic(32,c).s]))
(Rsh32x64 <t>
x
- (Const64 <config.fe.TypeUInt64()> [31])))
+ (Const64 <fe.TypeUInt64()> [31])))
(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 == 0 ->
(Sub64 <t>
(Rsh64x64 <t>
(Hmul64 <t>
- (Const64 <config.fe.TypeUInt64()> [int64(smagic(64,c).m/2)])
+ (Const64 <fe.TypeUInt64()> [int64(smagic(64,c).m/2)])
x)
- (Const64 <config.fe.TypeUInt64()> [smagic(64,c).s-1]))
+ (Const64 <fe.TypeUInt64()> [smagic(64,c).s-1]))
(Rsh64x64 <t>
x
- (Const64 <config.fe.TypeUInt64()> [63])))
+ (Const64 <fe.TypeUInt64()> [63])))
(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 != 0 ->
(Sub64 <t>
(Rsh64x64 <t>
(Add64 <t>
(Hmul64 <t>
- (Const64 <config.fe.TypeUInt64()> [int64(smagic(64,c).m)])
+ (Const64 <fe.TypeUInt64()> [int64(smagic(64,c).m)])
x)
x)
- (Const64 <config.fe.TypeUInt64()> [smagic(64,c).s]))
+ (Const64 <fe.TypeUInt64()> [smagic(64,c).s]))
(Rsh64x64 <t>
x
- (Const64 <config.fe.TypeUInt64()> [63])))
+ (Const64 <fe.TypeUInt64()> [63])))
// Unsigned mod by power of 2 constant.
(Mod8u <t> n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (And8 n (Const8 <t> [(c&0xff)-1]))
&& mem.Op == OpStaticCall
&& isSameSym(mem.Aux, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
- && warnRule(config.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
+ && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
-> (Invalid)
(NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem)) mem)
&& mem.Op == OpStaticCall
&& isSameSym(mem.Aux, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
- && warnRule(config.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
+ && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
-> (Invalid)
// De-virtualize interface calls into static calls.
fmt.Fprintln(w, "var _ = math.MinInt8 // in case not otherwise used")
// Main rewrite routine is a switch on v.Op.
- fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name)
+ fmt.Fprintf(w, "func rewriteValue%s(v *Value) bool {\n", arch.name)
fmt.Fprintf(w, "switch v.Op {\n")
for _, op := range ops {
fmt.Fprintf(w, "case %s:\n", op)
- fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, op)
+ fmt.Fprintf(w, "return rewriteValue%s_%s(v)\n", arch.name, op)
}
fmt.Fprintf(w, "}\n")
fmt.Fprintf(w, "return false\n")
// Generate a routine per op. Note that we don't make one giant routine
// because it is too big for some compilers.
for _, op := range ops {
- fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, op)
- fmt.Fprintln(w, "b := v.Block")
- fmt.Fprintln(w, "_ = b")
+ buf := new(bytes.Buffer)
var canFail bool
for i, rule := range oprules[op] {
match, cond, result := rule.parse()
- fmt.Fprintf(w, "// match: %s\n", match)
- fmt.Fprintf(w, "// cond: %s\n", cond)
- fmt.Fprintf(w, "// result: %s\n", result)
+ fmt.Fprintf(buf, "// match: %s\n", match)
+ fmt.Fprintf(buf, "// cond: %s\n", cond)
+ fmt.Fprintf(buf, "// result: %s\n", result)
canFail = false
- fmt.Fprintf(w, "for {\n")
- if genMatch(w, arch, match, rule.loc) {
+ fmt.Fprintf(buf, "for {\n")
+ if genMatch(buf, arch, match, rule.loc) {
canFail = true
}
if cond != "" {
- fmt.Fprintf(w, "if !(%s) {\nbreak\n}\n", cond)
+ fmt.Fprintf(buf, "if !(%s) {\nbreak\n}\n", cond)
canFail = true
}
if !canFail && i != len(oprules[op])-1 {
log.Fatalf("unconditional rule %s is followed by other rules", match)
}
- genResult(w, arch, result, rule.loc)
+ genResult(buf, arch, result, rule.loc)
if *genLog {
- fmt.Fprintf(w, "logRule(\"%s\")\n", rule.loc)
+ fmt.Fprintf(buf, "logRule(\"%s\")\n", rule.loc)
}
- fmt.Fprintf(w, "return true\n")
+ fmt.Fprintf(buf, "return true\n")
- fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(buf, "}\n")
}
if canFail {
- fmt.Fprintf(w, "return false\n")
- }
+ fmt.Fprintf(buf, "return false\n")
+ }
+
+ body := buf.String()
+ // Do a rough match to predict whether we need b, config, and/or fe.
+ // It's not precise--thus the blank assignments--but it's good enough
+ // to avoid generating needless code and doing pointless nil checks.
+ hasb := strings.Contains(body, "b.")
+ hasconfig := strings.Contains(body, "config.")
+ hasfe := strings.Contains(body, "fe.")
+ fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value) bool {\n", arch.name, op)
+ if hasb || hasconfig || hasfe {
+ fmt.Fprintln(w, "b := v.Block")
+ fmt.Fprintln(w, "_ = b")
+ }
+ if hasconfig || hasfe {
+ fmt.Fprintln(w, "config := b.Func.Config")
+ fmt.Fprintln(w, "_ = config")
+ }
+ if hasfe {
+ fmt.Fprintln(w, "fe := config.fe")
+ fmt.Fprintln(w, "_ = fe")
+ }
+ fmt.Fprint(w, body)
fmt.Fprintf(w, "}\n")
}
// Generate block rewrite function. There are only a few block types
// so we can make this one function with a switch.
- fmt.Fprintf(w, "func rewriteBlock%s(b *Block, config *Config) bool {\n", arch.name)
+ fmt.Fprintf(w, "func rewriteBlock%s(b *Block) bool {\n", arch.name)
+ fmt.Fprintln(w, "config := b.Func.Config")
+ fmt.Fprintln(w, "_ = config")
+ fmt.Fprintln(w, "fe := config.fe")
+ fmt.Fprintln(w, "_ = fe")
fmt.Fprintf(w, "switch b.Kind {\n")
ops = nil
for op := range blockrules {
case "Flags", "Mem", "Void", "Int128":
return "Type" + typ
default:
- return "config.fe.Type" + typ + "()"
+ return "fe.Type" + typ + "()"
}
}
"strings"
)
-func applyRewrite(f *Func, rb func(*Block, *Config) bool, rv func(*Value, *Config) bool) {
+func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter) {
// repeat rewrites until we find no more rewrites
var curb *Block
var curv *Value
// TODO(khr): print source location also
}
}()
- config := f.Config
for {
change := false
for _, b := range f.Blocks {
}
}
curb = b
- if rb(b, config) {
+ if rb(b) {
change = true
}
curb = nil
// apply rewrite function
curv = v
- if rv(v, config) {
+ if rv(v) {
change = true
}
curv = nil
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValue386(v *Value, config *Config) bool {
+func rewriteValue386(v *Value) bool {
switch v.Op {
case Op386ADCL:
- return rewriteValue386_Op386ADCL(v, config)
+ return rewriteValue386_Op386ADCL(v)
case Op386ADDL:
- return rewriteValue386_Op386ADDL(v, config)
+ return rewriteValue386_Op386ADDL(v)
case Op386ADDLcarry:
- return rewriteValue386_Op386ADDLcarry(v, config)
+ return rewriteValue386_Op386ADDLcarry(v)
case Op386ADDLconst:
- return rewriteValue386_Op386ADDLconst(v, config)
+ return rewriteValue386_Op386ADDLconst(v)
case Op386ANDL:
- return rewriteValue386_Op386ANDL(v, config)
+ return rewriteValue386_Op386ANDL(v)
case Op386ANDLconst:
- return rewriteValue386_Op386ANDLconst(v, config)
+ return rewriteValue386_Op386ANDLconst(v)
case Op386CMPB:
- return rewriteValue386_Op386CMPB(v, config)
+ return rewriteValue386_Op386CMPB(v)
case Op386CMPBconst:
- return rewriteValue386_Op386CMPBconst(v, config)
+ return rewriteValue386_Op386CMPBconst(v)
case Op386CMPL:
- return rewriteValue386_Op386CMPL(v, config)
+ return rewriteValue386_Op386CMPL(v)
case Op386CMPLconst:
- return rewriteValue386_Op386CMPLconst(v, config)
+ return rewriteValue386_Op386CMPLconst(v)
case Op386CMPW:
- return rewriteValue386_Op386CMPW(v, config)
+ return rewriteValue386_Op386CMPW(v)
case Op386CMPWconst:
- return rewriteValue386_Op386CMPWconst(v, config)
+ return rewriteValue386_Op386CMPWconst(v)
case Op386LEAL:
- return rewriteValue386_Op386LEAL(v, config)
+ return rewriteValue386_Op386LEAL(v)
case Op386LEAL1:
- return rewriteValue386_Op386LEAL1(v, config)
+ return rewriteValue386_Op386LEAL1(v)
case Op386LEAL2:
- return rewriteValue386_Op386LEAL2(v, config)
+ return rewriteValue386_Op386LEAL2(v)
case Op386LEAL4:
- return rewriteValue386_Op386LEAL4(v, config)
+ return rewriteValue386_Op386LEAL4(v)
case Op386LEAL8:
- return rewriteValue386_Op386LEAL8(v, config)
+ return rewriteValue386_Op386LEAL8(v)
case Op386MOVBLSX:
- return rewriteValue386_Op386MOVBLSX(v, config)
+ return rewriteValue386_Op386MOVBLSX(v)
case Op386MOVBLSXload:
- return rewriteValue386_Op386MOVBLSXload(v, config)
+ return rewriteValue386_Op386MOVBLSXload(v)
case Op386MOVBLZX:
- return rewriteValue386_Op386MOVBLZX(v, config)
+ return rewriteValue386_Op386MOVBLZX(v)
case Op386MOVBload:
- return rewriteValue386_Op386MOVBload(v, config)
+ return rewriteValue386_Op386MOVBload(v)
case Op386MOVBloadidx1:
- return rewriteValue386_Op386MOVBloadidx1(v, config)
+ return rewriteValue386_Op386MOVBloadidx1(v)
case Op386MOVBstore:
- return rewriteValue386_Op386MOVBstore(v, config)
+ return rewriteValue386_Op386MOVBstore(v)
case Op386MOVBstoreconst:
- return rewriteValue386_Op386MOVBstoreconst(v, config)
+ return rewriteValue386_Op386MOVBstoreconst(v)
case Op386MOVBstoreconstidx1:
- return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
+ return rewriteValue386_Op386MOVBstoreconstidx1(v)
case Op386MOVBstoreidx1:
- return rewriteValue386_Op386MOVBstoreidx1(v, config)
+ return rewriteValue386_Op386MOVBstoreidx1(v)
case Op386MOVLload:
- return rewriteValue386_Op386MOVLload(v, config)
+ return rewriteValue386_Op386MOVLload(v)
case Op386MOVLloadidx1:
- return rewriteValue386_Op386MOVLloadidx1(v, config)
+ return rewriteValue386_Op386MOVLloadidx1(v)
case Op386MOVLloadidx4:
- return rewriteValue386_Op386MOVLloadidx4(v, config)
+ return rewriteValue386_Op386MOVLloadidx4(v)
case Op386MOVLstore:
- return rewriteValue386_Op386MOVLstore(v, config)
+ return rewriteValue386_Op386MOVLstore(v)
case Op386MOVLstoreconst:
- return rewriteValue386_Op386MOVLstoreconst(v, config)
+ return rewriteValue386_Op386MOVLstoreconst(v)
case Op386MOVLstoreconstidx1:
- return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
+ return rewriteValue386_Op386MOVLstoreconstidx1(v)
case Op386MOVLstoreconstidx4:
- return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
+ return rewriteValue386_Op386MOVLstoreconstidx4(v)
case Op386MOVLstoreidx1:
- return rewriteValue386_Op386MOVLstoreidx1(v, config)
+ return rewriteValue386_Op386MOVLstoreidx1(v)
case Op386MOVLstoreidx4:
- return rewriteValue386_Op386MOVLstoreidx4(v, config)
+ return rewriteValue386_Op386MOVLstoreidx4(v)
case Op386MOVSDconst:
- return rewriteValue386_Op386MOVSDconst(v, config)
+ return rewriteValue386_Op386MOVSDconst(v)
case Op386MOVSDload:
- return rewriteValue386_Op386MOVSDload(v, config)
+ return rewriteValue386_Op386MOVSDload(v)
case Op386MOVSDloadidx1:
- return rewriteValue386_Op386MOVSDloadidx1(v, config)
+ return rewriteValue386_Op386MOVSDloadidx1(v)
case Op386MOVSDloadidx8:
- return rewriteValue386_Op386MOVSDloadidx8(v, config)
+ return rewriteValue386_Op386MOVSDloadidx8(v)
case Op386MOVSDstore:
- return rewriteValue386_Op386MOVSDstore(v, config)
+ return rewriteValue386_Op386MOVSDstore(v)
case Op386MOVSDstoreidx1:
- return rewriteValue386_Op386MOVSDstoreidx1(v, config)
+ return rewriteValue386_Op386MOVSDstoreidx1(v)
case Op386MOVSDstoreidx8:
- return rewriteValue386_Op386MOVSDstoreidx8(v, config)
+ return rewriteValue386_Op386MOVSDstoreidx8(v)
case Op386MOVSSconst:
- return rewriteValue386_Op386MOVSSconst(v, config)
+ return rewriteValue386_Op386MOVSSconst(v)
case Op386MOVSSload:
- return rewriteValue386_Op386MOVSSload(v, config)
+ return rewriteValue386_Op386MOVSSload(v)
case Op386MOVSSloadidx1:
- return rewriteValue386_Op386MOVSSloadidx1(v, config)
+ return rewriteValue386_Op386MOVSSloadidx1(v)
case Op386MOVSSloadidx4:
- return rewriteValue386_Op386MOVSSloadidx4(v, config)
+ return rewriteValue386_Op386MOVSSloadidx4(v)
case Op386MOVSSstore:
- return rewriteValue386_Op386MOVSSstore(v, config)
+ return rewriteValue386_Op386MOVSSstore(v)
case Op386MOVSSstoreidx1:
- return rewriteValue386_Op386MOVSSstoreidx1(v, config)
+ return rewriteValue386_Op386MOVSSstoreidx1(v)
case Op386MOVSSstoreidx4:
- return rewriteValue386_Op386MOVSSstoreidx4(v, config)
+ return rewriteValue386_Op386MOVSSstoreidx4(v)
case Op386MOVWLSX:
- return rewriteValue386_Op386MOVWLSX(v, config)
+ return rewriteValue386_Op386MOVWLSX(v)
case Op386MOVWLSXload:
- return rewriteValue386_Op386MOVWLSXload(v, config)
+ return rewriteValue386_Op386MOVWLSXload(v)
case Op386MOVWLZX:
- return rewriteValue386_Op386MOVWLZX(v, config)
+ return rewriteValue386_Op386MOVWLZX(v)
case Op386MOVWload:
- return rewriteValue386_Op386MOVWload(v, config)
+ return rewriteValue386_Op386MOVWload(v)
case Op386MOVWloadidx1:
- return rewriteValue386_Op386MOVWloadidx1(v, config)
+ return rewriteValue386_Op386MOVWloadidx1(v)
case Op386MOVWloadidx2:
- return rewriteValue386_Op386MOVWloadidx2(v, config)
+ return rewriteValue386_Op386MOVWloadidx2(v)
case Op386MOVWstore:
- return rewriteValue386_Op386MOVWstore(v, config)
+ return rewriteValue386_Op386MOVWstore(v)
case Op386MOVWstoreconst:
- return rewriteValue386_Op386MOVWstoreconst(v, config)
+ return rewriteValue386_Op386MOVWstoreconst(v)
case Op386MOVWstoreconstidx1:
- return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
+ return rewriteValue386_Op386MOVWstoreconstidx1(v)
case Op386MOVWstoreconstidx2:
- return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
+ return rewriteValue386_Op386MOVWstoreconstidx2(v)
case Op386MOVWstoreidx1:
- return rewriteValue386_Op386MOVWstoreidx1(v, config)
+ return rewriteValue386_Op386MOVWstoreidx1(v)
case Op386MOVWstoreidx2:
- return rewriteValue386_Op386MOVWstoreidx2(v, config)
+ return rewriteValue386_Op386MOVWstoreidx2(v)
case Op386MULL:
- return rewriteValue386_Op386MULL(v, config)
+ return rewriteValue386_Op386MULL(v)
case Op386MULLconst:
- return rewriteValue386_Op386MULLconst(v, config)
+ return rewriteValue386_Op386MULLconst(v)
case Op386NEGL:
- return rewriteValue386_Op386NEGL(v, config)
+ return rewriteValue386_Op386NEGL(v)
case Op386NOTL:
- return rewriteValue386_Op386NOTL(v, config)
+ return rewriteValue386_Op386NOTL(v)
case Op386ORL:
- return rewriteValue386_Op386ORL(v, config)
+ return rewriteValue386_Op386ORL(v)
case Op386ORLconst:
- return rewriteValue386_Op386ORLconst(v, config)
+ return rewriteValue386_Op386ORLconst(v)
case Op386ROLBconst:
- return rewriteValue386_Op386ROLBconst(v, config)
+ return rewriteValue386_Op386ROLBconst(v)
case Op386ROLLconst:
- return rewriteValue386_Op386ROLLconst(v, config)
+ return rewriteValue386_Op386ROLLconst(v)
case Op386ROLWconst:
- return rewriteValue386_Op386ROLWconst(v, config)
+ return rewriteValue386_Op386ROLWconst(v)
case Op386SARB:
- return rewriteValue386_Op386SARB(v, config)
+ return rewriteValue386_Op386SARB(v)
case Op386SARBconst:
- return rewriteValue386_Op386SARBconst(v, config)
+ return rewriteValue386_Op386SARBconst(v)
case Op386SARL:
- return rewriteValue386_Op386SARL(v, config)
+ return rewriteValue386_Op386SARL(v)
case Op386SARLconst:
- return rewriteValue386_Op386SARLconst(v, config)
+ return rewriteValue386_Op386SARLconst(v)
case Op386SARW:
- return rewriteValue386_Op386SARW(v, config)
+ return rewriteValue386_Op386SARW(v)
case Op386SARWconst:
- return rewriteValue386_Op386SARWconst(v, config)
+ return rewriteValue386_Op386SARWconst(v)
case Op386SBBL:
- return rewriteValue386_Op386SBBL(v, config)
+ return rewriteValue386_Op386SBBL(v)
case Op386SBBLcarrymask:
- return rewriteValue386_Op386SBBLcarrymask(v, config)
+ return rewriteValue386_Op386SBBLcarrymask(v)
case Op386SETA:
- return rewriteValue386_Op386SETA(v, config)
+ return rewriteValue386_Op386SETA(v)
case Op386SETAE:
- return rewriteValue386_Op386SETAE(v, config)
+ return rewriteValue386_Op386SETAE(v)
case Op386SETB:
- return rewriteValue386_Op386SETB(v, config)
+ return rewriteValue386_Op386SETB(v)
case Op386SETBE:
- return rewriteValue386_Op386SETBE(v, config)
+ return rewriteValue386_Op386SETBE(v)
case Op386SETEQ:
- return rewriteValue386_Op386SETEQ(v, config)
+ return rewriteValue386_Op386SETEQ(v)
case Op386SETG:
- return rewriteValue386_Op386SETG(v, config)
+ return rewriteValue386_Op386SETG(v)
case Op386SETGE:
- return rewriteValue386_Op386SETGE(v, config)
+ return rewriteValue386_Op386SETGE(v)
case Op386SETL:
- return rewriteValue386_Op386SETL(v, config)
+ return rewriteValue386_Op386SETL(v)
case Op386SETLE:
- return rewriteValue386_Op386SETLE(v, config)
+ return rewriteValue386_Op386SETLE(v)
case Op386SETNE:
- return rewriteValue386_Op386SETNE(v, config)
+ return rewriteValue386_Op386SETNE(v)
case Op386SHLL:
- return rewriteValue386_Op386SHLL(v, config)
+ return rewriteValue386_Op386SHLL(v)
case Op386SHLLconst:
- return rewriteValue386_Op386SHLLconst(v, config)
+ return rewriteValue386_Op386SHLLconst(v)
case Op386SHRB:
- return rewriteValue386_Op386SHRB(v, config)
+ return rewriteValue386_Op386SHRB(v)
case Op386SHRBconst:
- return rewriteValue386_Op386SHRBconst(v, config)
+ return rewriteValue386_Op386SHRBconst(v)
case Op386SHRL:
- return rewriteValue386_Op386SHRL(v, config)
+ return rewriteValue386_Op386SHRL(v)
case Op386SHRLconst:
- return rewriteValue386_Op386SHRLconst(v, config)
+ return rewriteValue386_Op386SHRLconst(v)
case Op386SHRW:
- return rewriteValue386_Op386SHRW(v, config)
+ return rewriteValue386_Op386SHRW(v)
case Op386SHRWconst:
- return rewriteValue386_Op386SHRWconst(v, config)
+ return rewriteValue386_Op386SHRWconst(v)
case Op386SUBL:
- return rewriteValue386_Op386SUBL(v, config)
+ return rewriteValue386_Op386SUBL(v)
case Op386SUBLcarry:
- return rewriteValue386_Op386SUBLcarry(v, config)
+ return rewriteValue386_Op386SUBLcarry(v)
case Op386SUBLconst:
- return rewriteValue386_Op386SUBLconst(v, config)
+ return rewriteValue386_Op386SUBLconst(v)
case Op386XORL:
- return rewriteValue386_Op386XORL(v, config)
+ return rewriteValue386_Op386XORL(v)
case Op386XORLconst:
- return rewriteValue386_Op386XORLconst(v, config)
+ return rewriteValue386_Op386XORLconst(v)
case OpAdd16:
- return rewriteValue386_OpAdd16(v, config)
+ return rewriteValue386_OpAdd16(v)
case OpAdd32:
- return rewriteValue386_OpAdd32(v, config)
+ return rewriteValue386_OpAdd32(v)
case OpAdd32F:
- return rewriteValue386_OpAdd32F(v, config)
+ return rewriteValue386_OpAdd32F(v)
case OpAdd32carry:
- return rewriteValue386_OpAdd32carry(v, config)
+ return rewriteValue386_OpAdd32carry(v)
case OpAdd32withcarry:
- return rewriteValue386_OpAdd32withcarry(v, config)
+ return rewriteValue386_OpAdd32withcarry(v)
case OpAdd64F:
- return rewriteValue386_OpAdd64F(v, config)
+ return rewriteValue386_OpAdd64F(v)
case OpAdd8:
- return rewriteValue386_OpAdd8(v, config)
+ return rewriteValue386_OpAdd8(v)
case OpAddPtr:
- return rewriteValue386_OpAddPtr(v, config)
+ return rewriteValue386_OpAddPtr(v)
case OpAddr:
- return rewriteValue386_OpAddr(v, config)
+ return rewriteValue386_OpAddr(v)
case OpAnd16:
- return rewriteValue386_OpAnd16(v, config)
+ return rewriteValue386_OpAnd16(v)
case OpAnd32:
- return rewriteValue386_OpAnd32(v, config)
+ return rewriteValue386_OpAnd32(v)
case OpAnd8:
- return rewriteValue386_OpAnd8(v, config)
+ return rewriteValue386_OpAnd8(v)
case OpAndB:
- return rewriteValue386_OpAndB(v, config)
+ return rewriteValue386_OpAndB(v)
case OpAvg32u:
- return rewriteValue386_OpAvg32u(v, config)
+ return rewriteValue386_OpAvg32u(v)
case OpBswap32:
- return rewriteValue386_OpBswap32(v, config)
+ return rewriteValue386_OpBswap32(v)
case OpClosureCall:
- return rewriteValue386_OpClosureCall(v, config)
+ return rewriteValue386_OpClosureCall(v)
case OpCom16:
- return rewriteValue386_OpCom16(v, config)
+ return rewriteValue386_OpCom16(v)
case OpCom32:
- return rewriteValue386_OpCom32(v, config)
+ return rewriteValue386_OpCom32(v)
case OpCom8:
- return rewriteValue386_OpCom8(v, config)
+ return rewriteValue386_OpCom8(v)
case OpConst16:
- return rewriteValue386_OpConst16(v, config)
+ return rewriteValue386_OpConst16(v)
case OpConst32:
- return rewriteValue386_OpConst32(v, config)
+ return rewriteValue386_OpConst32(v)
case OpConst32F:
- return rewriteValue386_OpConst32F(v, config)
+ return rewriteValue386_OpConst32F(v)
case OpConst64F:
- return rewriteValue386_OpConst64F(v, config)
+ return rewriteValue386_OpConst64F(v)
case OpConst8:
- return rewriteValue386_OpConst8(v, config)
+ return rewriteValue386_OpConst8(v)
case OpConstBool:
- return rewriteValue386_OpConstBool(v, config)
+ return rewriteValue386_OpConstBool(v)
case OpConstNil:
- return rewriteValue386_OpConstNil(v, config)
+ return rewriteValue386_OpConstNil(v)
case OpConvert:
- return rewriteValue386_OpConvert(v, config)
+ return rewriteValue386_OpConvert(v)
case OpCvt32Fto32:
- return rewriteValue386_OpCvt32Fto32(v, config)
+ return rewriteValue386_OpCvt32Fto32(v)
case OpCvt32Fto64F:
- return rewriteValue386_OpCvt32Fto64F(v, config)
+ return rewriteValue386_OpCvt32Fto64F(v)
case OpCvt32to32F:
- return rewriteValue386_OpCvt32to32F(v, config)
+ return rewriteValue386_OpCvt32to32F(v)
case OpCvt32to64F:
- return rewriteValue386_OpCvt32to64F(v, config)
+ return rewriteValue386_OpCvt32to64F(v)
case OpCvt64Fto32:
- return rewriteValue386_OpCvt64Fto32(v, config)
+ return rewriteValue386_OpCvt64Fto32(v)
case OpCvt64Fto32F:
- return rewriteValue386_OpCvt64Fto32F(v, config)
+ return rewriteValue386_OpCvt64Fto32F(v)
case OpDiv16:
- return rewriteValue386_OpDiv16(v, config)
+ return rewriteValue386_OpDiv16(v)
case OpDiv16u:
- return rewriteValue386_OpDiv16u(v, config)
+ return rewriteValue386_OpDiv16u(v)
case OpDiv32:
- return rewriteValue386_OpDiv32(v, config)
+ return rewriteValue386_OpDiv32(v)
case OpDiv32F:
- return rewriteValue386_OpDiv32F(v, config)
+ return rewriteValue386_OpDiv32F(v)
case OpDiv32u:
- return rewriteValue386_OpDiv32u(v, config)
+ return rewriteValue386_OpDiv32u(v)
case OpDiv64F:
- return rewriteValue386_OpDiv64F(v, config)
+ return rewriteValue386_OpDiv64F(v)
case OpDiv8:
- return rewriteValue386_OpDiv8(v, config)
+ return rewriteValue386_OpDiv8(v)
case OpDiv8u:
- return rewriteValue386_OpDiv8u(v, config)
+ return rewriteValue386_OpDiv8u(v)
case OpEq16:
- return rewriteValue386_OpEq16(v, config)
+ return rewriteValue386_OpEq16(v)
case OpEq32:
- return rewriteValue386_OpEq32(v, config)
+ return rewriteValue386_OpEq32(v)
case OpEq32F:
- return rewriteValue386_OpEq32F(v, config)
+ return rewriteValue386_OpEq32F(v)
case OpEq64F:
- return rewriteValue386_OpEq64F(v, config)
+ return rewriteValue386_OpEq64F(v)
case OpEq8:
- return rewriteValue386_OpEq8(v, config)
+ return rewriteValue386_OpEq8(v)
case OpEqB:
- return rewriteValue386_OpEqB(v, config)
+ return rewriteValue386_OpEqB(v)
case OpEqPtr:
- return rewriteValue386_OpEqPtr(v, config)
+ return rewriteValue386_OpEqPtr(v)
case OpGeq16:
- return rewriteValue386_OpGeq16(v, config)
+ return rewriteValue386_OpGeq16(v)
case OpGeq16U:
- return rewriteValue386_OpGeq16U(v, config)
+ return rewriteValue386_OpGeq16U(v)
case OpGeq32:
- return rewriteValue386_OpGeq32(v, config)
+ return rewriteValue386_OpGeq32(v)
case OpGeq32F:
- return rewriteValue386_OpGeq32F(v, config)
+ return rewriteValue386_OpGeq32F(v)
case OpGeq32U:
- return rewriteValue386_OpGeq32U(v, config)
+ return rewriteValue386_OpGeq32U(v)
case OpGeq64F:
- return rewriteValue386_OpGeq64F(v, config)
+ return rewriteValue386_OpGeq64F(v)
case OpGeq8:
- return rewriteValue386_OpGeq8(v, config)
+ return rewriteValue386_OpGeq8(v)
case OpGeq8U:
- return rewriteValue386_OpGeq8U(v, config)
+ return rewriteValue386_OpGeq8U(v)
case OpGetClosurePtr:
- return rewriteValue386_OpGetClosurePtr(v, config)
+ return rewriteValue386_OpGetClosurePtr(v)
case OpGetG:
- return rewriteValue386_OpGetG(v, config)
+ return rewriteValue386_OpGetG(v)
case OpGreater16:
- return rewriteValue386_OpGreater16(v, config)
+ return rewriteValue386_OpGreater16(v)
case OpGreater16U:
- return rewriteValue386_OpGreater16U(v, config)
+ return rewriteValue386_OpGreater16U(v)
case OpGreater32:
- return rewriteValue386_OpGreater32(v, config)
+ return rewriteValue386_OpGreater32(v)
case OpGreater32F:
- return rewriteValue386_OpGreater32F(v, config)
+ return rewriteValue386_OpGreater32F(v)
case OpGreater32U:
- return rewriteValue386_OpGreater32U(v, config)
+ return rewriteValue386_OpGreater32U(v)
case OpGreater64F:
- return rewriteValue386_OpGreater64F(v, config)
+ return rewriteValue386_OpGreater64F(v)
case OpGreater8:
- return rewriteValue386_OpGreater8(v, config)
+ return rewriteValue386_OpGreater8(v)
case OpGreater8U:
- return rewriteValue386_OpGreater8U(v, config)
+ return rewriteValue386_OpGreater8U(v)
case OpHmul32:
- return rewriteValue386_OpHmul32(v, config)
+ return rewriteValue386_OpHmul32(v)
case OpHmul32u:
- return rewriteValue386_OpHmul32u(v, config)
+ return rewriteValue386_OpHmul32u(v)
case OpInterCall:
- return rewriteValue386_OpInterCall(v, config)
+ return rewriteValue386_OpInterCall(v)
case OpIsInBounds:
- return rewriteValue386_OpIsInBounds(v, config)
+ return rewriteValue386_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValue386_OpIsNonNil(v, config)
+ return rewriteValue386_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValue386_OpIsSliceInBounds(v, config)
+ return rewriteValue386_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValue386_OpLeq16(v, config)
+ return rewriteValue386_OpLeq16(v)
case OpLeq16U:
- return rewriteValue386_OpLeq16U(v, config)
+ return rewriteValue386_OpLeq16U(v)
case OpLeq32:
- return rewriteValue386_OpLeq32(v, config)
+ return rewriteValue386_OpLeq32(v)
case OpLeq32F:
- return rewriteValue386_OpLeq32F(v, config)
+ return rewriteValue386_OpLeq32F(v)
case OpLeq32U:
- return rewriteValue386_OpLeq32U(v, config)
+ return rewriteValue386_OpLeq32U(v)
case OpLeq64F:
- return rewriteValue386_OpLeq64F(v, config)
+ return rewriteValue386_OpLeq64F(v)
case OpLeq8:
- return rewriteValue386_OpLeq8(v, config)
+ return rewriteValue386_OpLeq8(v)
case OpLeq8U:
- return rewriteValue386_OpLeq8U(v, config)
+ return rewriteValue386_OpLeq8U(v)
case OpLess16:
- return rewriteValue386_OpLess16(v, config)
+ return rewriteValue386_OpLess16(v)
case OpLess16U:
- return rewriteValue386_OpLess16U(v, config)
+ return rewriteValue386_OpLess16U(v)
case OpLess32:
- return rewriteValue386_OpLess32(v, config)
+ return rewriteValue386_OpLess32(v)
case OpLess32F:
- return rewriteValue386_OpLess32F(v, config)
+ return rewriteValue386_OpLess32F(v)
case OpLess32U:
- return rewriteValue386_OpLess32U(v, config)
+ return rewriteValue386_OpLess32U(v)
case OpLess64F:
- return rewriteValue386_OpLess64F(v, config)
+ return rewriteValue386_OpLess64F(v)
case OpLess8:
- return rewriteValue386_OpLess8(v, config)
+ return rewriteValue386_OpLess8(v)
case OpLess8U:
- return rewriteValue386_OpLess8U(v, config)
+ return rewriteValue386_OpLess8U(v)
case OpLoad:
- return rewriteValue386_OpLoad(v, config)
+ return rewriteValue386_OpLoad(v)
case OpLsh16x16:
- return rewriteValue386_OpLsh16x16(v, config)
+ return rewriteValue386_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValue386_OpLsh16x32(v, config)
+ return rewriteValue386_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValue386_OpLsh16x64(v, config)
+ return rewriteValue386_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValue386_OpLsh16x8(v, config)
+ return rewriteValue386_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValue386_OpLsh32x16(v, config)
+ return rewriteValue386_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValue386_OpLsh32x32(v, config)
+ return rewriteValue386_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValue386_OpLsh32x64(v, config)
+ return rewriteValue386_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValue386_OpLsh32x8(v, config)
+ return rewriteValue386_OpLsh32x8(v)
case OpLsh8x16:
- return rewriteValue386_OpLsh8x16(v, config)
+ return rewriteValue386_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValue386_OpLsh8x32(v, config)
+ return rewriteValue386_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValue386_OpLsh8x64(v, config)
+ return rewriteValue386_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValue386_OpLsh8x8(v, config)
+ return rewriteValue386_OpLsh8x8(v)
case OpMod16:
- return rewriteValue386_OpMod16(v, config)
+ return rewriteValue386_OpMod16(v)
case OpMod16u:
- return rewriteValue386_OpMod16u(v, config)
+ return rewriteValue386_OpMod16u(v)
case OpMod32:
- return rewriteValue386_OpMod32(v, config)
+ return rewriteValue386_OpMod32(v)
case OpMod32u:
- return rewriteValue386_OpMod32u(v, config)
+ return rewriteValue386_OpMod32u(v)
case OpMod8:
- return rewriteValue386_OpMod8(v, config)
+ return rewriteValue386_OpMod8(v)
case OpMod8u:
- return rewriteValue386_OpMod8u(v, config)
+ return rewriteValue386_OpMod8u(v)
case OpMove:
- return rewriteValue386_OpMove(v, config)
+ return rewriteValue386_OpMove(v)
case OpMul16:
- return rewriteValue386_OpMul16(v, config)
+ return rewriteValue386_OpMul16(v)
case OpMul32:
- return rewriteValue386_OpMul32(v, config)
+ return rewriteValue386_OpMul32(v)
case OpMul32F:
- return rewriteValue386_OpMul32F(v, config)
+ return rewriteValue386_OpMul32F(v)
case OpMul32uhilo:
- return rewriteValue386_OpMul32uhilo(v, config)
+ return rewriteValue386_OpMul32uhilo(v)
case OpMul64F:
- return rewriteValue386_OpMul64F(v, config)
+ return rewriteValue386_OpMul64F(v)
case OpMul8:
- return rewriteValue386_OpMul8(v, config)
+ return rewriteValue386_OpMul8(v)
case OpNeg16:
- return rewriteValue386_OpNeg16(v, config)
+ return rewriteValue386_OpNeg16(v)
case OpNeg32:
- return rewriteValue386_OpNeg32(v, config)
+ return rewriteValue386_OpNeg32(v)
case OpNeg32F:
- return rewriteValue386_OpNeg32F(v, config)
+ return rewriteValue386_OpNeg32F(v)
case OpNeg64F:
- return rewriteValue386_OpNeg64F(v, config)
+ return rewriteValue386_OpNeg64F(v)
case OpNeg8:
- return rewriteValue386_OpNeg8(v, config)
+ return rewriteValue386_OpNeg8(v)
case OpNeq16:
- return rewriteValue386_OpNeq16(v, config)
+ return rewriteValue386_OpNeq16(v)
case OpNeq32:
- return rewriteValue386_OpNeq32(v, config)
+ return rewriteValue386_OpNeq32(v)
case OpNeq32F:
- return rewriteValue386_OpNeq32F(v, config)
+ return rewriteValue386_OpNeq32F(v)
case OpNeq64F:
- return rewriteValue386_OpNeq64F(v, config)
+ return rewriteValue386_OpNeq64F(v)
case OpNeq8:
- return rewriteValue386_OpNeq8(v, config)
+ return rewriteValue386_OpNeq8(v)
case OpNeqB:
- return rewriteValue386_OpNeqB(v, config)
+ return rewriteValue386_OpNeqB(v)
case OpNeqPtr:
- return rewriteValue386_OpNeqPtr(v, config)
+ return rewriteValue386_OpNeqPtr(v)
case OpNilCheck:
- return rewriteValue386_OpNilCheck(v, config)
+ return rewriteValue386_OpNilCheck(v)
case OpNot:
- return rewriteValue386_OpNot(v, config)
+ return rewriteValue386_OpNot(v)
case OpOffPtr:
- return rewriteValue386_OpOffPtr(v, config)
+ return rewriteValue386_OpOffPtr(v)
case OpOr16:
- return rewriteValue386_OpOr16(v, config)
+ return rewriteValue386_OpOr16(v)
case OpOr32:
- return rewriteValue386_OpOr32(v, config)
+ return rewriteValue386_OpOr32(v)
case OpOr8:
- return rewriteValue386_OpOr8(v, config)
+ return rewriteValue386_OpOr8(v)
case OpOrB:
- return rewriteValue386_OpOrB(v, config)
+ return rewriteValue386_OpOrB(v)
case OpRound32F:
- return rewriteValue386_OpRound32F(v, config)
+ return rewriteValue386_OpRound32F(v)
case OpRound64F:
- return rewriteValue386_OpRound64F(v, config)
+ return rewriteValue386_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValue386_OpRsh16Ux16(v, config)
+ return rewriteValue386_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValue386_OpRsh16Ux32(v, config)
+ return rewriteValue386_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValue386_OpRsh16Ux64(v, config)
+ return rewriteValue386_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValue386_OpRsh16Ux8(v, config)
+ return rewriteValue386_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValue386_OpRsh16x16(v, config)
+ return rewriteValue386_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValue386_OpRsh16x32(v, config)
+ return rewriteValue386_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValue386_OpRsh16x64(v, config)
+ return rewriteValue386_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValue386_OpRsh16x8(v, config)
+ return rewriteValue386_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValue386_OpRsh32Ux16(v, config)
+ return rewriteValue386_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValue386_OpRsh32Ux32(v, config)
+ return rewriteValue386_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValue386_OpRsh32Ux64(v, config)
+ return rewriteValue386_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValue386_OpRsh32Ux8(v, config)
+ return rewriteValue386_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValue386_OpRsh32x16(v, config)
+ return rewriteValue386_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValue386_OpRsh32x32(v, config)
+ return rewriteValue386_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValue386_OpRsh32x64(v, config)
+ return rewriteValue386_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValue386_OpRsh32x8(v, config)
+ return rewriteValue386_OpRsh32x8(v)
case OpRsh8Ux16:
- return rewriteValue386_OpRsh8Ux16(v, config)
+ return rewriteValue386_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValue386_OpRsh8Ux32(v, config)
+ return rewriteValue386_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValue386_OpRsh8Ux64(v, config)
+ return rewriteValue386_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValue386_OpRsh8Ux8(v, config)
+ return rewriteValue386_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValue386_OpRsh8x16(v, config)
+ return rewriteValue386_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValue386_OpRsh8x32(v, config)
+ return rewriteValue386_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValue386_OpRsh8x64(v, config)
+ return rewriteValue386_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValue386_OpRsh8x8(v, config)
+ return rewriteValue386_OpRsh8x8(v)
case OpSignExt16to32:
- return rewriteValue386_OpSignExt16to32(v, config)
+ return rewriteValue386_OpSignExt16to32(v)
case OpSignExt8to16:
- return rewriteValue386_OpSignExt8to16(v, config)
+ return rewriteValue386_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValue386_OpSignExt8to32(v, config)
+ return rewriteValue386_OpSignExt8to32(v)
case OpSignmask:
- return rewriteValue386_OpSignmask(v, config)
+ return rewriteValue386_OpSignmask(v)
case OpSlicemask:
- return rewriteValue386_OpSlicemask(v, config)
+ return rewriteValue386_OpSlicemask(v)
case OpSqrt:
- return rewriteValue386_OpSqrt(v, config)
+ return rewriteValue386_OpSqrt(v)
case OpStaticCall:
- return rewriteValue386_OpStaticCall(v, config)
+ return rewriteValue386_OpStaticCall(v)
case OpStore:
- return rewriteValue386_OpStore(v, config)
+ return rewriteValue386_OpStore(v)
case OpSub16:
- return rewriteValue386_OpSub16(v, config)
+ return rewriteValue386_OpSub16(v)
case OpSub32:
- return rewriteValue386_OpSub32(v, config)
+ return rewriteValue386_OpSub32(v)
case OpSub32F:
- return rewriteValue386_OpSub32F(v, config)
+ return rewriteValue386_OpSub32F(v)
case OpSub32carry:
- return rewriteValue386_OpSub32carry(v, config)
+ return rewriteValue386_OpSub32carry(v)
case OpSub32withcarry:
- return rewriteValue386_OpSub32withcarry(v, config)
+ return rewriteValue386_OpSub32withcarry(v)
case OpSub64F:
- return rewriteValue386_OpSub64F(v, config)
+ return rewriteValue386_OpSub64F(v)
case OpSub8:
- return rewriteValue386_OpSub8(v, config)
+ return rewriteValue386_OpSub8(v)
case OpSubPtr:
- return rewriteValue386_OpSubPtr(v, config)
+ return rewriteValue386_OpSubPtr(v)
case OpTrunc16to8:
- return rewriteValue386_OpTrunc16to8(v, config)
+ return rewriteValue386_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValue386_OpTrunc32to16(v, config)
+ return rewriteValue386_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValue386_OpTrunc32to8(v, config)
+ return rewriteValue386_OpTrunc32to8(v)
case OpXor16:
- return rewriteValue386_OpXor16(v, config)
+ return rewriteValue386_OpXor16(v)
case OpXor32:
- return rewriteValue386_OpXor32(v, config)
+ return rewriteValue386_OpXor32(v)
case OpXor8:
- return rewriteValue386_OpXor8(v, config)
+ return rewriteValue386_OpXor8(v)
case OpZero:
- return rewriteValue386_OpZero(v, config)
+ return rewriteValue386_OpZero(v)
case OpZeroExt16to32:
- return rewriteValue386_OpZeroExt16to32(v, config)
+ return rewriteValue386_OpZeroExt16to32(v)
case OpZeroExt8to16:
- return rewriteValue386_OpZeroExt8to16(v, config)
+ return rewriteValue386_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValue386_OpZeroExt8to32(v, config)
+ return rewriteValue386_OpZeroExt8to32(v)
case OpZeromask:
- return rewriteValue386_OpZeromask(v, config)
+ return rewriteValue386_OpZeromask(v)
}
return false
}
-func rewriteValue386_Op386ADCL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ADCL(v *Value) bool {
// match: (ADCL x (MOVLconst [c]) f)
// cond:
// result: (ADCLconst [c] x f)
}
return false
}
-func rewriteValue386_Op386ADDL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ADDL(v *Value) bool {
// match: (ADDL x (MOVLconst [c]))
// cond:
// result: (ADDLconst [c] x)
}
return false
}
-func rewriteValue386_Op386ADDLcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ADDLcarry(v *Value) bool {
// match: (ADDLcarry x (MOVLconst [c]))
// cond:
// result: (ADDLconstcarry [c] x)
}
return false
}
-func rewriteValue386_Op386ADDLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ADDLconst(v *Value) bool {
// match: (ADDLconst [c] (ADDL x y))
// cond:
// result: (LEAL1 [c] x y)
}
return false
}
-func rewriteValue386_Op386ANDL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ANDL(v *Value) bool {
// match: (ANDL x (MOVLconst [c]))
// cond:
// result: (ANDLconst [c] x)
}
return false
}
-func rewriteValue386_Op386ANDLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ANDLconst(v *Value) bool {
// match: (ANDLconst [c] (ANDLconst [d] x))
// cond:
// result: (ANDLconst [c & d] x)
}
return false
}
-func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
+func rewriteValue386_Op386CMPB(v *Value) bool {
b := v.Block
_ = b
// match: (CMPB x (MOVLconst [c]))
}
return false
}
-func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386CMPBconst(v *Value) bool {
// match: (CMPBconst (MOVLconst [x]) [y])
// cond: int8(x)==int8(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValue386_Op386CMPL(v *Value, config *Config) bool {
+func rewriteValue386_Op386CMPL(v *Value) bool {
b := v.Block
_ = b
// match: (CMPL x (MOVLconst [c]))
}
return false
}
-func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386CMPLconst(v *Value) bool {
// match: (CMPLconst (MOVLconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValue386_Op386CMPW(v *Value, config *Config) bool {
+func rewriteValue386_Op386CMPW(v *Value) bool {
b := v.Block
_ = b
// match: (CMPW x (MOVLconst [c]))
}
return false
}
-func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386CMPWconst(v *Value) bool {
// match: (CMPWconst (MOVLconst [x]) [y])
// cond: int16(x)==int16(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValue386_Op386LEAL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386LEAL(v *Value) bool {
// match: (LEAL [c] {s} (ADDLconst [d] x))
// cond: is32Bit(c+d)
// result: (LEAL [c+d] {s} x)
}
return false
}
-func rewriteValue386_Op386LEAL1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386LEAL1(v *Value) bool {
// match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAL1 [c+d] {s} x y)
}
return false
}
-func rewriteValue386_Op386LEAL2(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386LEAL2(v *Value) bool {
// match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAL2 [c+d] {s} x y)
}
return false
}
-func rewriteValue386_Op386LEAL4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386LEAL4(v *Value) bool {
// match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAL4 [c+d] {s} x y)
}
return false
}
-func rewriteValue386_Op386LEAL8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386LEAL8(v *Value) bool {
// match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAL8 [c+d] {s} x y)
}
return false
}
-func rewriteValue386_Op386MOVBLSX(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBLSX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem))
}
return false
}
-func rewriteValue386_Op386MOVBLSXload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBLSXload(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
}
return false
}
-func rewriteValue386_Op386MOVBLZX(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBLZX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem))
}
return false
}
-func rewriteValue386_Op386MOVBload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBload(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValue386_Op386MOVBloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVBloadidx1(v *Value) bool {
// match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
// cond:
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVBstore(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBstore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr x mem)
}
return false
}
-func rewriteValue386_Op386MOVBstoreconst(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValue386_Op386MOVBstoreconstidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVBstoreconstidx1(v *Value) bool {
// match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
// cond:
// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVBstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool {
// match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
// cond:
// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValue386_Op386MOVLload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVLload(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValue386_Op386MOVLloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVLloadidx1(v *Value) bool {
// match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
// cond:
// result: (MOVLloadidx4 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVLloadidx4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVLloadidx4(v *Value) bool {
// match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
// cond:
// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVLstore(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVLstore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVLstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValue386_Op386MOVLstoreconst(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValue386_Op386MOVLstoreconstidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool {
// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
// cond:
// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVLstoreconstidx4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool {
// match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem)
// cond:
// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVLstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool {
// match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem)
// cond:
// result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
}
return false
}
-func rewriteValue386_Op386MOVLstoreidx4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool {
// match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
// cond:
// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValue386_Op386MOVSDconst(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDconst(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (MOVSDconst [c])
// cond: config.ctxt.Flag_shared
// result: (MOVSDconst2 (MOVSDconst1 [c]))
break
}
v.reset(Op386MOVSDconst2)
- v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, fe.TypeUInt32())
v0.AuxInt = c
v.AddArg(v0)
return true
}
return false
}
-func rewriteValue386_Op386MOVSDload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDload(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVSDload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValue386_Op386MOVSDloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool {
// match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
// cond:
// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVSDloadidx8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool {
// match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem)
// cond:
// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVSDstore(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDstore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValue386_Op386MOVSDstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool {
// match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
// cond:
// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValue386_Op386MOVSDstoreidx8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool {
// match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)
// cond:
// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValue386_Op386MOVSSconst(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSconst(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (MOVSSconst [c])
// cond: config.ctxt.Flag_shared
// result: (MOVSSconst2 (MOVSSconst1 [c]))
break
}
v.reset(Op386MOVSSconst2)
- v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, fe.TypeUInt32())
v0.AuxInt = c
v.AddArg(v0)
return true
}
return false
}
-func rewriteValue386_Op386MOVSSload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSload(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVSSload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValue386_Op386MOVSSloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool {
// match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
// cond:
// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVSSloadidx4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool {
// match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
// cond:
// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVSSstore(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSstore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValue386_Op386MOVSSstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool {
// match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
// cond:
// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValue386_Op386MOVSSstoreidx4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool {
// match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
// cond:
// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValue386_Op386MOVWLSX(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWLSX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
}
return false
}
-func rewriteValue386_Op386MOVWLSXload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
}
return false
}
-func rewriteValue386_Op386MOVWLZX(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWLZX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
}
return false
}
-func rewriteValue386_Op386MOVWload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWload(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValue386_Op386MOVWloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVWloadidx1(v *Value) bool {
// match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
// cond:
// result: (MOVWloadidx2 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVWloadidx2(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVWloadidx2(v *Value) bool {
// match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem)
// cond:
// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVWstore(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWstore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
// cond:
// result: (MOVWstore [off] {sym} ptr x mem)
}
return false
}
-func rewriteValue386_Op386MOVWstoreconst(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValue386_Op386MOVWstoreconstidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool {
// match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
// cond:
// result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValue386_Op386MOVWstoreconstidx2(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem)
}
return false
}
-func rewriteValue386_Op386MOVWstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool {
// match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem)
// cond:
// result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
}
return false
}
-func rewriteValue386_Op386MOVWstoreidx2(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)
}
return false
}
-func rewriteValue386_Op386MULL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386MULL(v *Value) bool {
// match: (MULL x (MOVLconst [c]))
// cond:
// result: (MULLconst [c] x)
}
return false
}
-func rewriteValue386_Op386MULLconst(v *Value, config *Config) bool {
+func rewriteValue386_Op386MULLconst(v *Value) bool {
b := v.Block
_ = b
// match: (MULLconst [c] (MULLconst [d] x))
}
return false
}
-func rewriteValue386_Op386NEGL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386NEGL(v *Value) bool {
// match: (NEGL (MOVLconst [c]))
// cond:
// result: (MOVLconst [int64(int32(-c))])
}
return false
}
-func rewriteValue386_Op386NOTL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386NOTL(v *Value) bool {
// match: (NOTL (MOVLconst [c]))
// cond:
// result: (MOVLconst [^c])
}
return false
}
-func rewriteValue386_Op386ORL(v *Value, config *Config) bool {
+func rewriteValue386_Op386ORL(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ORL x (MOVLconst [c]))
// cond:
// result: (ORLconst [c] x)
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, Op386MOVWload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, fe.TypeUInt16())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i
}
return false
}
-func rewriteValue386_Op386ORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ORLconst(v *Value) bool {
// match: (ORLconst [c] x)
// cond: int32(c)==0
// result: x
}
return false
}
-func rewriteValue386_Op386ROLBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ROLBconst(v *Value) bool {
// match: (ROLBconst [c] (ROLBconst [d] x))
// cond:
// result: (ROLBconst [(c+d)& 7] x)
}
return false
}
-func rewriteValue386_Op386ROLLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ROLLconst(v *Value) bool {
// match: (ROLLconst [c] (ROLLconst [d] x))
// cond:
// result: (ROLLconst [(c+d)&31] x)
}
return false
}
-func rewriteValue386_Op386ROLWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386ROLWconst(v *Value) bool {
// match: (ROLWconst [c] (ROLWconst [d] x))
// cond:
// result: (ROLWconst [(c+d)&15] x)
}
return false
}
-func rewriteValue386_Op386SARB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SARB(v *Value) bool {
// match: (SARB x (MOVLconst [c]))
// cond:
// result: (SARBconst [min(c&31,7)] x)
}
return false
}
-func rewriteValue386_Op386SARBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SARBconst(v *Value) bool {
// match: (SARBconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValue386_Op386SARL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SARL(v *Value) bool {
// match: (SARL x (MOVLconst [c]))
// cond:
// result: (SARLconst [c&31] x)
}
return false
}
-func rewriteValue386_Op386SARLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SARLconst(v *Value) bool {
// match: (SARLconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValue386_Op386SARW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SARW(v *Value) bool {
// match: (SARW x (MOVLconst [c]))
// cond:
// result: (SARWconst [min(c&31,15)] x)
}
return false
}
-func rewriteValue386_Op386SARWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SARWconst(v *Value) bool {
// match: (SARWconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValue386_Op386SBBL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SBBL(v *Value) bool {
// match: (SBBL x (MOVLconst [c]) f)
// cond:
// result: (SBBLconst [c] x f)
}
return false
}
-func rewriteValue386_Op386SBBLcarrymask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SBBLcarrymask(v *Value) bool {
// match: (SBBLcarrymask (FlagEQ))
// cond:
// result: (MOVLconst [0])
}
return false
}
-func rewriteValue386_Op386SETA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETA(v *Value) bool {
// match: (SETA (InvertFlags x))
// cond:
// result: (SETB x)
}
return false
}
-func rewriteValue386_Op386SETAE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETAE(v *Value) bool {
// match: (SETAE (InvertFlags x))
// cond:
// result: (SETBE x)
}
return false
}
-func rewriteValue386_Op386SETB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETB(v *Value) bool {
// match: (SETB (InvertFlags x))
// cond:
// result: (SETA x)
}
return false
}
-func rewriteValue386_Op386SETBE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETBE(v *Value) bool {
// match: (SETBE (InvertFlags x))
// cond:
// result: (SETAE x)
}
return false
}
-func rewriteValue386_Op386SETEQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETEQ(v *Value) bool {
// match: (SETEQ (InvertFlags x))
// cond:
// result: (SETEQ x)
}
return false
}
-func rewriteValue386_Op386SETG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETG(v *Value) bool {
// match: (SETG (InvertFlags x))
// cond:
// result: (SETL x)
}
return false
}
-func rewriteValue386_Op386SETGE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETGE(v *Value) bool {
// match: (SETGE (InvertFlags x))
// cond:
// result: (SETLE x)
}
return false
}
-func rewriteValue386_Op386SETL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETL(v *Value) bool {
// match: (SETL (InvertFlags x))
// cond:
// result: (SETG x)
}
return false
}
-func rewriteValue386_Op386SETLE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETLE(v *Value) bool {
// match: (SETLE (InvertFlags x))
// cond:
// result: (SETGE x)
}
return false
}
-func rewriteValue386_Op386SETNE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SETNE(v *Value) bool {
// match: (SETNE (InvertFlags x))
// cond:
// result: (SETNE x)
}
return false
}
-func rewriteValue386_Op386SHLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SHLL(v *Value) bool {
// match: (SHLL x (MOVLconst [c]))
// cond:
// result: (SHLLconst [c&31] x)
}
return false
}
-func rewriteValue386_Op386SHLLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SHLLconst(v *Value) bool {
// match: (SHLLconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValue386_Op386SHRB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SHRB(v *Value) bool {
// match: (SHRB x (MOVLconst [c]))
// cond: c&31 < 8
// result: (SHRBconst [c&31] x)
}
return false
}
-func rewriteValue386_Op386SHRBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SHRBconst(v *Value) bool {
// match: (SHRBconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValue386_Op386SHRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SHRL(v *Value) bool {
// match: (SHRL x (MOVLconst [c]))
// cond:
// result: (SHRLconst [c&31] x)
}
return false
}
-func rewriteValue386_Op386SHRLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SHRLconst(v *Value) bool {
// match: (SHRLconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValue386_Op386SHRW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SHRW(v *Value) bool {
// match: (SHRW x (MOVLconst [c]))
// cond: c&31 < 16
// result: (SHRWconst [c&31] x)
}
return false
}
-func rewriteValue386_Op386SHRWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SHRWconst(v *Value) bool {
// match: (SHRWconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValue386_Op386SUBL(v *Value, config *Config) bool {
+func rewriteValue386_Op386SUBL(v *Value) bool {
b := v.Block
_ = b
// match: (SUBL x (MOVLconst [c]))
}
return false
}
-func rewriteValue386_Op386SUBLcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SUBLcarry(v *Value) bool {
// match: (SUBLcarry x (MOVLconst [c]))
// cond:
// result: (SUBLconstcarry [c] x)
}
return false
}
-func rewriteValue386_Op386SUBLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386SUBLconst(v *Value) bool {
// match: (SUBLconst [c] x)
// cond: int32(c) == 0
// result: x
return true
}
}
-func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386XORL(v *Value) bool {
// match: (XORL x (MOVLconst [c]))
// cond:
// result: (XORLconst [c] x)
}
return false
}
-func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_Op386XORLconst(v *Value) bool {
// match: (XORLconst [c] (XORLconst [d] x))
// cond:
// result: (XORLconst [c ^ d] x)
}
return false
}
-func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAdd16(v *Value) bool {
// match: (Add16 x y)
// cond:
// result: (ADDL x y)
return true
}
}
-func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAdd32(v *Value) bool {
// match: (Add32 x y)
// cond:
// result: (ADDL x y)
return true
}
}
-func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAdd32F(v *Value) bool {
// match: (Add32F x y)
// cond:
// result: (ADDSS x y)
return true
}
}
-func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAdd32carry(v *Value) bool {
// match: (Add32carry x y)
// cond:
// result: (ADDLcarry x y)
return true
}
}
-func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAdd32withcarry(v *Value) bool {
// match: (Add32withcarry x y c)
// cond:
// result: (ADCL x y c)
return true
}
}
-func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAdd64F(v *Value) bool {
// match: (Add64F x y)
// cond:
// result: (ADDSD x y)
return true
}
}
-func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAdd8(v *Value) bool {
// match: (Add8 x y)
// cond:
// result: (ADDL x y)
return true
}
}
-func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAddPtr(v *Value) bool {
// match: (AddPtr x y)
// cond:
// result: (ADDL x y)
return true
}
}
-func rewriteValue386_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAddr(v *Value) bool {
// match: (Addr {sym} base)
// cond:
// result: (LEAL {sym} base)
return true
}
}
-func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAnd16(v *Value) bool {
// match: (And16 x y)
// cond:
// result: (ANDL x y)
return true
}
}
-func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAnd32(v *Value) bool {
// match: (And32 x y)
// cond:
// result: (ANDL x y)
return true
}
}
-func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAnd8(v *Value) bool {
// match: (And8 x y)
// cond:
// result: (ANDL x y)
return true
}
}
-func rewriteValue386_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAndB(v *Value) bool {
// match: (AndB x y)
// cond:
// result: (ANDL x y)
return true
}
}
-func rewriteValue386_OpAvg32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpAvg32u(v *Value) bool {
// match: (Avg32u x y)
// cond:
// result: (AVGLU x y)
return true
}
}
-func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpBswap32(v *Value) bool {
// match: (Bswap32 x)
// cond:
// result: (BSWAPL x)
return true
}
}
-func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpClosureCall(v *Value) bool {
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
return true
}
}
-func rewriteValue386_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCom16(v *Value) bool {
// match: (Com16 x)
// cond:
// result: (NOTL x)
return true
}
}
-func rewriteValue386_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCom32(v *Value) bool {
// match: (Com32 x)
// cond:
// result: (NOTL x)
return true
}
}
-func rewriteValue386_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCom8(v *Value) bool {
// match: (Com8 x)
// cond:
// result: (NOTL x)
return true
}
}
-func rewriteValue386_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpConst16(v *Value) bool {
// match: (Const16 [val])
// cond:
// result: (MOVLconst [val])
return true
}
}
-func rewriteValue386_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpConst32(v *Value) bool {
// match: (Const32 [val])
// cond:
// result: (MOVLconst [val])
return true
}
}
-func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// cond:
// result: (MOVSSconst [val])
return true
}
}
-func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// cond:
// result: (MOVSDconst [val])
return true
}
}
-func rewriteValue386_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpConst8(v *Value) bool {
// match: (Const8 [val])
// cond:
// result: (MOVLconst [val])
return true
}
}
-func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// cond:
// result: (MOVLconst [b])
return true
}
}
-func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpConstNil(v *Value) bool {
// match: (ConstNil)
// cond:
// result: (MOVLconst [0])
return true
}
}
-func rewriteValue386_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpConvert(v *Value) bool {
// match: (Convert <t> x mem)
// cond:
// result: (MOVLconvert <t> x mem)
return true
}
}
-func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCvt32Fto32(v *Value) bool {
// match: (Cvt32Fto32 x)
// cond:
// result: (CVTTSS2SL x)
return true
}
}
-func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F x)
// cond:
// result: (CVTSS2SD x)
return true
}
}
-func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCvt32to32F(v *Value) bool {
// match: (Cvt32to32F x)
// cond:
// result: (CVTSL2SS x)
return true
}
}
-func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCvt32to64F(v *Value) bool {
// match: (Cvt32to64F x)
// cond:
// result: (CVTSL2SD x)
return true
}
}
-func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCvt64Fto32(v *Value) bool {
// match: (Cvt64Fto32 x)
// cond:
// result: (CVTTSD2SL x)
return true
}
}
-func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F x)
// cond:
// result: (CVTSD2SS x)
return true
}
}
-func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpDiv16(v *Value) bool {
// match: (Div16 x y)
// cond:
// result: (DIVW x y)
return true
}
}
-func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpDiv16u(v *Value) bool {
// match: (Div16u x y)
// cond:
// result: (DIVWU x y)
return true
}
}
-func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpDiv32(v *Value) bool {
// match: (Div32 x y)
// cond:
// result: (DIVL x y)
return true
}
}
-func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpDiv32F(v *Value) bool {
// match: (Div32F x y)
// cond:
// result: (DIVSS x y)
return true
}
}
-func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpDiv32u(v *Value) bool {
// match: (Div32u x y)
// cond:
// result: (DIVLU x y)
return true
}
}
-func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpDiv64F(v *Value) bool {
// match: (Div64F x y)
// cond:
// result: (DIVSD x y)
return true
}
}
-func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
+func rewriteValue386_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 x y)
// cond:
// result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
x := v.Args[0]
y := v.Args[1]
v.reset(Op386DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValue386_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u x y)
// cond:
// result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
x := v.Args[0]
y := v.Args[1]
v.reset(Op386DIVWU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValue386_OpEq16(v *Value, config *Config) bool {
+func rewriteValue386_OpEq16(v *Value) bool {
b := v.Block
_ = b
// match: (Eq16 x y)
return true
}
}
-func rewriteValue386_OpEq32(v *Value, config *Config) bool {
+func rewriteValue386_OpEq32(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32 x y)
return true
}
}
-func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
+func rewriteValue386_OpEq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32F x y)
return true
}
}
-func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
+func rewriteValue386_OpEq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64F x y)
return true
}
}
-func rewriteValue386_OpEq8(v *Value, config *Config) bool {
+func rewriteValue386_OpEq8(v *Value) bool {
b := v.Block
_ = b
// match: (Eq8 x y)
return true
}
}
-func rewriteValue386_OpEqB(v *Value, config *Config) bool {
+func rewriteValue386_OpEqB(v *Value) bool {
b := v.Block
_ = b
// match: (EqB x y)
return true
}
}
-func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValue386_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
// match: (EqPtr x y)
return true
}
}
-func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
+func rewriteValue386_OpGeq16(v *Value) bool {
b := v.Block
_ = b
// match: (Geq16 x y)
return true
}
}
-func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValue386_OpGeq16U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq16U x y)
return true
}
}
-func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
+func rewriteValue386_OpGeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32 x y)
return true
}
}
-func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValue386_OpGeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32F x y)
return true
}
}
-func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValue386_OpGeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32U x y)
return true
}
}
-func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValue386_OpGeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64F x y)
return true
}
}
-func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
+func rewriteValue386_OpGeq8(v *Value) bool {
b := v.Block
_ = b
// match: (Geq8 x y)
return true
}
}
-func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValue386_OpGeq8U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq8U x y)
return true
}
}
-func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpGetClosurePtr(v *Value) bool {
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
return true
}
}
-func rewriteValue386_OpGetG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpGetG(v *Value) bool {
// match: (GetG mem)
// cond:
// result: (LoweredGetG mem)
return true
}
}
-func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater16(v *Value) bool {
b := v.Block
_ = b
// match: (Greater16 x y)
return true
}
}
-func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater16U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater16U x y)
return true
}
}
-func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater32(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32 x y)
return true
}
}
-func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater32F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32F x y)
return true
}
}
-func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater32U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32U x y)
return true
}
}
-func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater64F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64F x y)
return true
}
}
-func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater8(v *Value) bool {
b := v.Block
_ = b
// match: (Greater8 x y)
return true
}
}
-func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater8U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater8U x y)
return true
}
}
-func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpHmul32(v *Value) bool {
// match: (Hmul32 x y)
// cond:
// result: (HMULL x y)
return true
}
}
-func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpHmul32u(v *Value) bool {
// match: (Hmul32u x y)
// cond:
// result: (HMULLU x y)
return true
}
}
-func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpInterCall(v *Value) bool {
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
return true
}
}
-func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValue386_OpIsInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsInBounds idx len)
return true
}
}
-func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValue386_OpIsNonNil(v *Value) bool {
b := v.Block
_ = b
// match: (IsNonNil p)
return true
}
}
-func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValue386_OpIsSliceInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsSliceInBounds idx len)
return true
}
}
-func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq16(v *Value) bool {
b := v.Block
_ = b
// match: (Leq16 x y)
return true
}
}
-func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq16U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq16U x y)
return true
}
}
-func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32 x y)
return true
}
}
-func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
return true
}
}
-func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32U x y)
return true
}
}
-func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64F x y)
return true
}
}
-func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq8(v *Value) bool {
b := v.Block
_ = b
// match: (Leq8 x y)
return true
}
}
-func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq8U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq8U x y)
return true
}
}
-func rewriteValue386_OpLess16(v *Value, config *Config) bool {
+func rewriteValue386_OpLess16(v *Value) bool {
b := v.Block
_ = b
// match: (Less16 x y)
return true
}
}
-func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
+func rewriteValue386_OpLess16U(v *Value) bool {
b := v.Block
_ = b
// match: (Less16U x y)
return true
}
}
-func rewriteValue386_OpLess32(v *Value, config *Config) bool {
+func rewriteValue386_OpLess32(v *Value) bool {
b := v.Block
_ = b
// match: (Less32 x y)
return true
}
}
-func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
+func rewriteValue386_OpLess32F(v *Value) bool {
b := v.Block
_ = b
// match: (Less32F x y)
return true
}
}
-func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
+func rewriteValue386_OpLess32U(v *Value) bool {
b := v.Block
_ = b
// match: (Less32U x y)
return true
}
}
-func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
+func rewriteValue386_OpLess64F(v *Value) bool {
b := v.Block
_ = b
// match: (Less64F x y)
return true
}
}
-func rewriteValue386_OpLess8(v *Value, config *Config) bool {
+func rewriteValue386_OpLess8(v *Value) bool {
b := v.Block
_ = b
// match: (Less8 x y)
return true
}
}
-func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
+func rewriteValue386_OpLess8U(v *Value) bool {
b := v.Block
_ = b
// match: (Less8U x y)
return true
}
}
-func rewriteValue386_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpLoad(v *Value) bool {
// match: (Load <t> ptr mem)
// cond: (is32BitInt(t) || isPtr(t))
// result: (MOVLload ptr mem)
}
return false
}
-func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x16 <t> x y)
return true
}
}
-func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x32 <t> x y)
return true
}
}
-func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpLsh16x64(v *Value) bool {
// match: (Lsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SHLLconst x [c])
}
return false
}
-func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x8 <t> x y)
return true
}
}
-func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x16 <t> x y)
return true
}
}
-func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x32 <t> x y)
return true
}
}
-func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpLsh32x64(v *Value) bool {
// match: (Lsh32x64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SHLLconst x [c])
}
return false
}
-func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x8 <t> x y)
return true
}
}
-func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x16 <t> x y)
return true
}
}
-func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x32 <t> x y)
return true
}
}
-func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpLsh8x64(v *Value) bool {
// match: (Lsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SHLLconst x [c])
}
return false
}
-func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x8 <t> x y)
return true
}
}
-func rewriteValue386_OpMod16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMod16(v *Value) bool {
// match: (Mod16 x y)
// cond:
// result: (MODW x y)
return true
}
}
-func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMod16u(v *Value) bool {
// match: (Mod16u x y)
// cond:
// result: (MODWU x y)
return true
}
}
-func rewriteValue386_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMod32(v *Value) bool {
// match: (Mod32 x y)
// cond:
// result: (MODL x y)
return true
}
}
-func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMod32u(v *Value) bool {
// match: (Mod32u x y)
// cond:
// result: (MODLU x y)
return true
}
}
-func rewriteValue386_OpMod8(v *Value, config *Config) bool {
+func rewriteValue386_OpMod8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8 x y)
// cond:
// result: (MODW (SignExt8to16 x) (SignExt8to16 y))
x := v.Args[0]
y := v.Args[1]
v.reset(Op386MODW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
+func rewriteValue386_OpMod8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8u x y)
// cond:
// result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
x := v.Args[0]
y := v.Args[1]
v.reset(Op386MODWU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValue386_OpMove(v *Value, config *Config) bool {
+func rewriteValue386_OpMove(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Move [0] _ _ mem)
// cond:
// result: mem
mem := v.Args[2]
v.reset(Op386MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVBload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(Op386MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVWload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(Op386MOVLstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(Op386MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVBload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, fe.TypeUInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386MOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVWload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, Op386MOVWload, fe.TypeUInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(Op386MOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVBload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, fe.TypeUInt8())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(Op386MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVWload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, fe.TypeUInt16())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(Op386MOVLstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(Op386MOVLstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
v2.AddArg(dst)
- v3 := b.NewValue0(v.Pos, Op386MOVLload, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
v3.AddArg(src)
v3.AddArg(mem)
v2.AddArg(v3)
v.reset(Op386REPMOVSL)
v.AddArg(dst)
v.AddArg(src)
- v0 := b.NewValue0(v.Pos, Op386MOVLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, fe.TypeUInt32())
v0.AuxInt = s / 4
v.AddArg(v0)
v.AddArg(mem)
}
return false
}
-func rewriteValue386_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMul16(v *Value) bool {
// match: (Mul16 x y)
// cond:
// result: (MULL x y)
return true
}
}
-func rewriteValue386_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMul32(v *Value) bool {
// match: (Mul32 x y)
// cond:
// result: (MULL x y)
return true
}
}
-func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMul32F(v *Value) bool {
// match: (Mul32F x y)
// cond:
// result: (MULSS x y)
return true
}
}
-func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMul32uhilo(v *Value) bool {
// match: (Mul32uhilo x y)
// cond:
// result: (MULLQU x y)
return true
}
}
-func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMul64F(v *Value) bool {
// match: (Mul64F x y)
// cond:
// result: (MULSD x y)
return true
}
}
-func rewriteValue386_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpMul8(v *Value) bool {
// match: (Mul8 x y)
// cond:
// result: (MULL x y)
return true
}
}
-func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpNeg16(v *Value) bool {
// match: (Neg16 x)
// cond:
// result: (NEGL x)
return true
}
}
-func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpNeg32(v *Value) bool {
// match: (Neg32 x)
// cond:
// result: (NEGL x)
return true
}
}
-func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
+func rewriteValue386_OpNeg32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neg32F x)
// cond: !config.use387
- // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
for {
x := v.Args[0]
if !(!config.use387) {
}
v.reset(Op386PXOR)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, Op386MOVSSconst, config.Frontend().TypeFloat32())
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst, fe.TypeFloat32())
v0.AuxInt = f2i(math.Copysign(0, -1))
v.AddArg(v0)
return true
}
return false
}
-func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
+func rewriteValue386_OpNeg64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neg64F x)
// cond: !config.use387
- // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
for {
x := v.Args[0]
if !(!config.use387) {
}
v.reset(Op386PXOR)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, Op386MOVSDconst, config.Frontend().TypeFloat64())
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst, fe.TypeFloat64())
v0.AuxInt = f2i(math.Copysign(0, -1))
v.AddArg(v0)
return true
}
return false
}
-func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpNeg8(v *Value) bool {
// match: (Neg8 x)
// cond:
// result: (NEGL x)
return true
}
}
-func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
+func rewriteValue386_OpNeq16(v *Value) bool {
b := v.Block
_ = b
// match: (Neq16 x y)
return true
}
}
-func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
+func rewriteValue386_OpNeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32 x y)
return true
}
}
-func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValue386_OpNeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32F x y)
return true
}
}
-func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValue386_OpNeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
return true
}
}
-func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
+func rewriteValue386_OpNeq8(v *Value) bool {
b := v.Block
_ = b
// match: (Neq8 x y)
return true
}
}
-func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
+func rewriteValue386_OpNeqB(v *Value) bool {
b := v.Block
_ = b
// match: (NeqB x y)
return true
}
}
-func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValue386_OpNeqPtr(v *Value) bool {
b := v.Block
_ = b
// match: (NeqPtr x y)
return true
}
}
-func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpNilCheck(v *Value) bool {
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
return true
}
}
-func rewriteValue386_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpNot(v *Value) bool {
// match: (Not x)
// cond:
// result: (XORLconst [1] x)
return true
}
}
-func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpOffPtr(v *Value) bool {
// match: (OffPtr [off] ptr)
// cond:
// result: (ADDLconst [off] ptr)
return true
}
}
-func rewriteValue386_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpOr16(v *Value) bool {
// match: (Or16 x y)
// cond:
// result: (ORL x y)
return true
}
}
-func rewriteValue386_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpOr32(v *Value) bool {
// match: (Or32 x y)
// cond:
// result: (ORL x y)
return true
}
}
-func rewriteValue386_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpOr8(v *Value) bool {
// match: (Or8 x y)
// cond:
// result: (ORL x y)
return true
}
}
-func rewriteValue386_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpOrB(v *Value) bool {
// match: (OrB x y)
// cond:
// result: (ORL x y)
return true
}
}
-func rewriteValue386_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpRound32F(v *Value) bool {
// match: (Round32F x)
// cond:
// result: x
return true
}
}
-func rewriteValue386_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpRound64F(v *Value) bool {
// match: (Round64F x)
// cond:
// result: x
return true
}
}
-func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux16 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux32 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpRsh16Ux64(v *Value) bool {
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SHRWconst x [c])
}
return false
}
-func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux8 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x16 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x32 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpRsh16x64(v *Value) bool {
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SARWconst x [c])
}
return false
}
-func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x8 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux16 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux32 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpRsh32Ux64(v *Value) bool {
// match: (Rsh32Ux64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SHRLconst x [c])
}
return false
}
-func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux8 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x16 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x32 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpRsh32x64(v *Value) bool {
// match: (Rsh32x64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SARLconst x [c])
}
return false
}
-func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x8 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux16 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux32 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpRsh8Ux64(v *Value) bool {
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SHRBconst x [c])
}
return false
}
-func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x16 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x32 <t> x y)
return true
}
}
-func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpRsh8x64(v *Value) bool {
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SARBconst x [c])
}
return false
}
-func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x8 <t> x y)
return true
}
}
-func rewriteValue386_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
// result: (MOVWLSX x)
return true
}
}
-func rewriteValue386_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 x)
// cond:
// result: (MOVBLSX x)
return true
}
}
-func rewriteValue386_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 x)
// cond:
// result: (MOVBLSX x)
return true
}
}
-func rewriteValue386_OpSignmask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSignmask(v *Value) bool {
// match: (Signmask x)
// cond:
// result: (SARLconst x [31])
return true
}
}
-func rewriteValue386_OpSlicemask(v *Value, config *Config) bool {
+func rewriteValue386_OpSlicemask(v *Value) bool {
b := v.Block
_ = b
// match: (Slicemask <t> x)
return true
}
}
-func rewriteValue386_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSqrt(v *Value) bool {
// match: (Sqrt x)
// cond:
// result: (SQRTSD x)
return true
}
}
-func rewriteValue386_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpStaticCall(v *Value) bool {
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
return true
}
}
-func rewriteValue386_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpStore(v *Value) bool {
// match: (Store {t} ptr val mem)
// cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
// result: (MOVSDstore ptr val mem)
}
return false
}
-func rewriteValue386_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSub16(v *Value) bool {
// match: (Sub16 x y)
// cond:
// result: (SUBL x y)
return true
}
}
-func rewriteValue386_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSub32(v *Value) bool {
// match: (Sub32 x y)
// cond:
// result: (SUBL x y)
return true
}
}
-func rewriteValue386_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSub32F(v *Value) bool {
// match: (Sub32F x y)
// cond:
// result: (SUBSS x y)
return true
}
}
-func rewriteValue386_OpSub32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSub32carry(v *Value) bool {
// match: (Sub32carry x y)
// cond:
// result: (SUBLcarry x y)
return true
}
}
-func rewriteValue386_OpSub32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSub32withcarry(v *Value) bool {
// match: (Sub32withcarry x y c)
// cond:
// result: (SBBL x y c)
return true
}
}
-func rewriteValue386_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSub64F(v *Value) bool {
// match: (Sub64F x y)
// cond:
// result: (SUBSD x y)
return true
}
}
-func rewriteValue386_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSub8(v *Value) bool {
// match: (Sub8 x y)
// cond:
// result: (SUBL x y)
return true
}
}
-func rewriteValue386_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpSubPtr(v *Value) bool {
// match: (SubPtr x y)
// cond:
// result: (SUBL x y)
return true
}
}
-func rewriteValue386_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValue386_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValue386_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValue386_OpXor16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpXor16(v *Value) bool {
// match: (Xor16 x y)
// cond:
// result: (XORL x y)
return true
}
}
-func rewriteValue386_OpXor32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpXor32(v *Value) bool {
// match: (Xor32 x y)
// cond:
// result: (XORL x y)
return true
}
}
-func rewriteValue386_OpXor8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpXor8(v *Value) bool {
// match: (Xor8 x y)
// cond:
// result: (XORL x y)
return true
}
}
-func rewriteValue386_OpZero(v *Value, config *Config) bool {
+func rewriteValue386_OpZero(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Zero [0] _ mem)
// cond:
// result: mem
}
v.reset(OpZero)
v.AuxInt = s - s%4
- v0 := b.NewValue0(v.Pos, Op386ADDLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386ADDLconst, fe.TypeUInt32())
v0.AuxInt = s % 4
v0.AddArg(destptr)
v.AddArg(v0)
v.reset(Op386DUFFZERO)
v.AuxInt = 1 * (128 - s/4)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
}
v.reset(Op386REPSTOSL)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, Op386MOVLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, fe.TypeUInt32())
v0.AuxInt = s / 4
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, Op386MOVLconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, Op386MOVLconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(mem)
}
return false
}
-func rewriteValue386_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVWLZX x)
return true
}
}
-func rewriteValue386_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBLZX x)
return true
}
}
-func rewriteValue386_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValue386_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBLZX x)
return true
}
}
-func rewriteValue386_OpZeromask(v *Value, config *Config) bool {
+func rewriteValue386_OpZeromask(v *Value) bool {
b := v.Block
_ = b
// match: (Zeromask <t> x)
return true
}
}
-func rewriteBlock386(b *Block, config *Config) bool {
+func rewriteBlock386(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case Block386EQ:
// match: (EQ (InvertFlags cmp) yes no)
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueAMD64(v *Value, config *Config) bool {
+func rewriteValueAMD64(v *Value) bool {
switch v.Op {
case OpAMD64ADDL:
- return rewriteValueAMD64_OpAMD64ADDL(v, config)
+ return rewriteValueAMD64_OpAMD64ADDL(v)
case OpAMD64ADDLconst:
- return rewriteValueAMD64_OpAMD64ADDLconst(v, config)
+ return rewriteValueAMD64_OpAMD64ADDLconst(v)
case OpAMD64ADDQ:
- return rewriteValueAMD64_OpAMD64ADDQ(v, config)
+ return rewriteValueAMD64_OpAMD64ADDQ(v)
case OpAMD64ADDQconst:
- return rewriteValueAMD64_OpAMD64ADDQconst(v, config)
+ return rewriteValueAMD64_OpAMD64ADDQconst(v)
case OpAMD64ADDSD:
- return rewriteValueAMD64_OpAMD64ADDSD(v, config)
+ return rewriteValueAMD64_OpAMD64ADDSD(v)
case OpAMD64ADDSS:
- return rewriteValueAMD64_OpAMD64ADDSS(v, config)
+ return rewriteValueAMD64_OpAMD64ADDSS(v)
case OpAMD64ANDL:
- return rewriteValueAMD64_OpAMD64ANDL(v, config)
+ return rewriteValueAMD64_OpAMD64ANDL(v)
case OpAMD64ANDLconst:
- return rewriteValueAMD64_OpAMD64ANDLconst(v, config)
+ return rewriteValueAMD64_OpAMD64ANDLconst(v)
case OpAMD64ANDQ:
- return rewriteValueAMD64_OpAMD64ANDQ(v, config)
+ return rewriteValueAMD64_OpAMD64ANDQ(v)
case OpAMD64ANDQconst:
- return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
+ return rewriteValueAMD64_OpAMD64ANDQconst(v)
case OpAMD64BSFQ:
- return rewriteValueAMD64_OpAMD64BSFQ(v, config)
+ return rewriteValueAMD64_OpAMD64BSFQ(v)
case OpAMD64BTQconst:
- return rewriteValueAMD64_OpAMD64BTQconst(v, config)
+ return rewriteValueAMD64_OpAMD64BTQconst(v)
case OpAMD64CMOVQEQ:
- return rewriteValueAMD64_OpAMD64CMOVQEQ(v, config)
+ return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
case OpAMD64CMPB:
- return rewriteValueAMD64_OpAMD64CMPB(v, config)
+ return rewriteValueAMD64_OpAMD64CMPB(v)
case OpAMD64CMPBconst:
- return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
+ return rewriteValueAMD64_OpAMD64CMPBconst(v)
case OpAMD64CMPL:
- return rewriteValueAMD64_OpAMD64CMPL(v, config)
+ return rewriteValueAMD64_OpAMD64CMPL(v)
case OpAMD64CMPLconst:
- return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
+ return rewriteValueAMD64_OpAMD64CMPLconst(v)
case OpAMD64CMPQ:
- return rewriteValueAMD64_OpAMD64CMPQ(v, config)
+ return rewriteValueAMD64_OpAMD64CMPQ(v)
case OpAMD64CMPQconst:
- return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
+ return rewriteValueAMD64_OpAMD64CMPQconst(v)
case OpAMD64CMPW:
- return rewriteValueAMD64_OpAMD64CMPW(v, config)
+ return rewriteValueAMD64_OpAMD64CMPW(v)
case OpAMD64CMPWconst:
- return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
+ return rewriteValueAMD64_OpAMD64CMPWconst(v)
case OpAMD64CMPXCHGLlock:
- return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config)
+ return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
case OpAMD64CMPXCHGQlock:
- return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config)
+ return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
case OpAMD64LEAL:
- return rewriteValueAMD64_OpAMD64LEAL(v, config)
+ return rewriteValueAMD64_OpAMD64LEAL(v)
case OpAMD64LEAQ:
- return rewriteValueAMD64_OpAMD64LEAQ(v, config)
+ return rewriteValueAMD64_OpAMD64LEAQ(v)
case OpAMD64LEAQ1:
- return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
+ return rewriteValueAMD64_OpAMD64LEAQ1(v)
case OpAMD64LEAQ2:
- return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
+ return rewriteValueAMD64_OpAMD64LEAQ2(v)
case OpAMD64LEAQ4:
- return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
+ return rewriteValueAMD64_OpAMD64LEAQ4(v)
case OpAMD64LEAQ8:
- return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
+ return rewriteValueAMD64_OpAMD64LEAQ8(v)
case OpAMD64MOVBQSX:
- return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBQSX(v)
case OpAMD64MOVBQSXload:
- return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
case OpAMD64MOVBQZX:
- return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBQZX(v)
case OpAMD64MOVBload:
- return rewriteValueAMD64_OpAMD64MOVBload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBload(v)
case OpAMD64MOVBloadidx1:
- return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBloadidx1(v)
case OpAMD64MOVBstore:
- return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBstore(v)
case OpAMD64MOVBstoreconst:
- return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
case OpAMD64MOVBstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v)
case OpAMD64MOVBstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v)
case OpAMD64MOVLQSX:
- return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLQSX(v)
case OpAMD64MOVLQSXload:
- return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
case OpAMD64MOVLQZX:
- return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLQZX(v)
case OpAMD64MOVLatomicload:
- return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
case OpAMD64MOVLload:
- return rewriteValueAMD64_OpAMD64MOVLload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLload(v)
case OpAMD64MOVLloadidx1:
- return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLloadidx1(v)
case OpAMD64MOVLloadidx4:
- return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLloadidx4(v)
case OpAMD64MOVLstore:
- return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLstore(v)
case OpAMD64MOVLstoreconst:
- return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
case OpAMD64MOVLstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v)
case OpAMD64MOVLstoreconstidx4:
- return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v)
case OpAMD64MOVLstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v)
case OpAMD64MOVLstoreidx4:
- return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
+ return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v)
case OpAMD64MOVOload:
- return rewriteValueAMD64_OpAMD64MOVOload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVOload(v)
case OpAMD64MOVOstore:
- return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
+ return rewriteValueAMD64_OpAMD64MOVOstore(v)
case OpAMD64MOVQatomicload:
- return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
case OpAMD64MOVQload:
- return rewriteValueAMD64_OpAMD64MOVQload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQload(v)
case OpAMD64MOVQloadidx1:
- return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQloadidx1(v)
case OpAMD64MOVQloadidx8:
- return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQloadidx8(v)
case OpAMD64MOVQstore:
- return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQstore(v)
case OpAMD64MOVQstoreconst:
- return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
case OpAMD64MOVQstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v)
case OpAMD64MOVQstoreconstidx8:
- return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v)
case OpAMD64MOVQstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v)
case OpAMD64MOVQstoreidx8:
- return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
+ return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v)
case OpAMD64MOVSDload:
- return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSDload(v)
case OpAMD64MOVSDloadidx1:
- return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v)
case OpAMD64MOVSDloadidx8:
- return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v)
case OpAMD64MOVSDstore:
- return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSDstore(v)
case OpAMD64MOVSDstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v)
case OpAMD64MOVSDstoreidx8:
- return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v)
case OpAMD64MOVSSload:
- return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSSload(v)
case OpAMD64MOVSSloadidx1:
- return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v)
case OpAMD64MOVSSloadidx4:
- return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v)
case OpAMD64MOVSSstore:
- return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSSstore(v)
case OpAMD64MOVSSstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v)
case OpAMD64MOVSSstoreidx4:
- return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
+ return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v)
case OpAMD64MOVWQSX:
- return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWQSX(v)
case OpAMD64MOVWQSXload:
- return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
case OpAMD64MOVWQZX:
- return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWQZX(v)
case OpAMD64MOVWload:
- return rewriteValueAMD64_OpAMD64MOVWload(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWload(v)
case OpAMD64MOVWloadidx1:
- return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWloadidx1(v)
case OpAMD64MOVWloadidx2:
- return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWloadidx2(v)
case OpAMD64MOVWstore:
- return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWstore(v)
case OpAMD64MOVWstoreconst:
- return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
case OpAMD64MOVWstoreconstidx1:
- return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v)
case OpAMD64MOVWstoreconstidx2:
- return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v)
case OpAMD64MOVWstoreidx1:
- return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v)
case OpAMD64MOVWstoreidx2:
- return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
+ return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v)
case OpAMD64MULL:
- return rewriteValueAMD64_OpAMD64MULL(v, config)
+ return rewriteValueAMD64_OpAMD64MULL(v)
case OpAMD64MULLconst:
- return rewriteValueAMD64_OpAMD64MULLconst(v, config)
+ return rewriteValueAMD64_OpAMD64MULLconst(v)
case OpAMD64MULQ:
- return rewriteValueAMD64_OpAMD64MULQ(v, config)
+ return rewriteValueAMD64_OpAMD64MULQ(v)
case OpAMD64MULQconst:
- return rewriteValueAMD64_OpAMD64MULQconst(v, config)
+ return rewriteValueAMD64_OpAMD64MULQconst(v)
case OpAMD64MULSD:
- return rewriteValueAMD64_OpAMD64MULSD(v, config)
+ return rewriteValueAMD64_OpAMD64MULSD(v)
case OpAMD64MULSS:
- return rewriteValueAMD64_OpAMD64MULSS(v, config)
+ return rewriteValueAMD64_OpAMD64MULSS(v)
case OpAMD64NEGL:
- return rewriteValueAMD64_OpAMD64NEGL(v, config)
+ return rewriteValueAMD64_OpAMD64NEGL(v)
case OpAMD64NEGQ:
- return rewriteValueAMD64_OpAMD64NEGQ(v, config)
+ return rewriteValueAMD64_OpAMD64NEGQ(v)
case OpAMD64NOTL:
- return rewriteValueAMD64_OpAMD64NOTL(v, config)
+ return rewriteValueAMD64_OpAMD64NOTL(v)
case OpAMD64NOTQ:
- return rewriteValueAMD64_OpAMD64NOTQ(v, config)
+ return rewriteValueAMD64_OpAMD64NOTQ(v)
case OpAMD64ORL:
- return rewriteValueAMD64_OpAMD64ORL(v, config)
+ return rewriteValueAMD64_OpAMD64ORL(v)
case OpAMD64ORLconst:
- return rewriteValueAMD64_OpAMD64ORLconst(v, config)
+ return rewriteValueAMD64_OpAMD64ORLconst(v)
case OpAMD64ORQ:
- return rewriteValueAMD64_OpAMD64ORQ(v, config)
+ return rewriteValueAMD64_OpAMD64ORQ(v)
case OpAMD64ORQconst:
- return rewriteValueAMD64_OpAMD64ORQconst(v, config)
+ return rewriteValueAMD64_OpAMD64ORQconst(v)
case OpAMD64ROLBconst:
- return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
+ return rewriteValueAMD64_OpAMD64ROLBconst(v)
case OpAMD64ROLLconst:
- return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
+ return rewriteValueAMD64_OpAMD64ROLLconst(v)
case OpAMD64ROLQconst:
- return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
+ return rewriteValueAMD64_OpAMD64ROLQconst(v)
case OpAMD64ROLWconst:
- return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
+ return rewriteValueAMD64_OpAMD64ROLWconst(v)
case OpAMD64SARB:
- return rewriteValueAMD64_OpAMD64SARB(v, config)
+ return rewriteValueAMD64_OpAMD64SARB(v)
case OpAMD64SARBconst:
- return rewriteValueAMD64_OpAMD64SARBconst(v, config)
+ return rewriteValueAMD64_OpAMD64SARBconst(v)
case OpAMD64SARL:
- return rewriteValueAMD64_OpAMD64SARL(v, config)
+ return rewriteValueAMD64_OpAMD64SARL(v)
case OpAMD64SARLconst:
- return rewriteValueAMD64_OpAMD64SARLconst(v, config)
+ return rewriteValueAMD64_OpAMD64SARLconst(v)
case OpAMD64SARQ:
- return rewriteValueAMD64_OpAMD64SARQ(v, config)
+ return rewriteValueAMD64_OpAMD64SARQ(v)
case OpAMD64SARQconst:
- return rewriteValueAMD64_OpAMD64SARQconst(v, config)
+ return rewriteValueAMD64_OpAMD64SARQconst(v)
case OpAMD64SARW:
- return rewriteValueAMD64_OpAMD64SARW(v, config)
+ return rewriteValueAMD64_OpAMD64SARW(v)
case OpAMD64SARWconst:
- return rewriteValueAMD64_OpAMD64SARWconst(v, config)
+ return rewriteValueAMD64_OpAMD64SARWconst(v)
case OpAMD64SBBLcarrymask:
- return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
+ return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
case OpAMD64SBBQcarrymask:
- return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
+ return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
case OpAMD64SETA:
- return rewriteValueAMD64_OpAMD64SETA(v, config)
+ return rewriteValueAMD64_OpAMD64SETA(v)
case OpAMD64SETAE:
- return rewriteValueAMD64_OpAMD64SETAE(v, config)
+ return rewriteValueAMD64_OpAMD64SETAE(v)
case OpAMD64SETB:
- return rewriteValueAMD64_OpAMD64SETB(v, config)
+ return rewriteValueAMD64_OpAMD64SETB(v)
case OpAMD64SETBE:
- return rewriteValueAMD64_OpAMD64SETBE(v, config)
+ return rewriteValueAMD64_OpAMD64SETBE(v)
case OpAMD64SETEQ:
- return rewriteValueAMD64_OpAMD64SETEQ(v, config)
+ return rewriteValueAMD64_OpAMD64SETEQ(v)
case OpAMD64SETG:
- return rewriteValueAMD64_OpAMD64SETG(v, config)
+ return rewriteValueAMD64_OpAMD64SETG(v)
case OpAMD64SETGE:
- return rewriteValueAMD64_OpAMD64SETGE(v, config)
+ return rewriteValueAMD64_OpAMD64SETGE(v)
case OpAMD64SETL:
- return rewriteValueAMD64_OpAMD64SETL(v, config)
+ return rewriteValueAMD64_OpAMD64SETL(v)
case OpAMD64SETLE:
- return rewriteValueAMD64_OpAMD64SETLE(v, config)
+ return rewriteValueAMD64_OpAMD64SETLE(v)
case OpAMD64SETNE:
- return rewriteValueAMD64_OpAMD64SETNE(v, config)
+ return rewriteValueAMD64_OpAMD64SETNE(v)
case OpAMD64SHLL:
- return rewriteValueAMD64_OpAMD64SHLL(v, config)
+ return rewriteValueAMD64_OpAMD64SHLL(v)
case OpAMD64SHLLconst:
- return rewriteValueAMD64_OpAMD64SHLLconst(v, config)
+ return rewriteValueAMD64_OpAMD64SHLLconst(v)
case OpAMD64SHLQ:
- return rewriteValueAMD64_OpAMD64SHLQ(v, config)
+ return rewriteValueAMD64_OpAMD64SHLQ(v)
case OpAMD64SHLQconst:
- return rewriteValueAMD64_OpAMD64SHLQconst(v, config)
+ return rewriteValueAMD64_OpAMD64SHLQconst(v)
case OpAMD64SHRB:
- return rewriteValueAMD64_OpAMD64SHRB(v, config)
+ return rewriteValueAMD64_OpAMD64SHRB(v)
case OpAMD64SHRBconst:
- return rewriteValueAMD64_OpAMD64SHRBconst(v, config)
+ return rewriteValueAMD64_OpAMD64SHRBconst(v)
case OpAMD64SHRL:
- return rewriteValueAMD64_OpAMD64SHRL(v, config)
+ return rewriteValueAMD64_OpAMD64SHRL(v)
case OpAMD64SHRLconst:
- return rewriteValueAMD64_OpAMD64SHRLconst(v, config)
+ return rewriteValueAMD64_OpAMD64SHRLconst(v)
case OpAMD64SHRQ:
- return rewriteValueAMD64_OpAMD64SHRQ(v, config)
+ return rewriteValueAMD64_OpAMD64SHRQ(v)
case OpAMD64SHRQconst:
- return rewriteValueAMD64_OpAMD64SHRQconst(v, config)
+ return rewriteValueAMD64_OpAMD64SHRQconst(v)
case OpAMD64SHRW:
- return rewriteValueAMD64_OpAMD64SHRW(v, config)
+ return rewriteValueAMD64_OpAMD64SHRW(v)
case OpAMD64SHRWconst:
- return rewriteValueAMD64_OpAMD64SHRWconst(v, config)
+ return rewriteValueAMD64_OpAMD64SHRWconst(v)
case OpAMD64SUBL:
- return rewriteValueAMD64_OpAMD64SUBL(v, config)
+ return rewriteValueAMD64_OpAMD64SUBL(v)
case OpAMD64SUBLconst:
- return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
+ return rewriteValueAMD64_OpAMD64SUBLconst(v)
case OpAMD64SUBQ:
- return rewriteValueAMD64_OpAMD64SUBQ(v, config)
+ return rewriteValueAMD64_OpAMD64SUBQ(v)
case OpAMD64SUBQconst:
- return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
+ return rewriteValueAMD64_OpAMD64SUBQconst(v)
case OpAMD64SUBSD:
- return rewriteValueAMD64_OpAMD64SUBSD(v, config)
+ return rewriteValueAMD64_OpAMD64SUBSD(v)
case OpAMD64SUBSS:
- return rewriteValueAMD64_OpAMD64SUBSS(v, config)
+ return rewriteValueAMD64_OpAMD64SUBSS(v)
case OpAMD64TESTB:
- return rewriteValueAMD64_OpAMD64TESTB(v, config)
+ return rewriteValueAMD64_OpAMD64TESTB(v)
case OpAMD64TESTL:
- return rewriteValueAMD64_OpAMD64TESTL(v, config)
+ return rewriteValueAMD64_OpAMD64TESTL(v)
case OpAMD64TESTQ:
- return rewriteValueAMD64_OpAMD64TESTQ(v, config)
+ return rewriteValueAMD64_OpAMD64TESTQ(v)
case OpAMD64TESTW:
- return rewriteValueAMD64_OpAMD64TESTW(v, config)
+ return rewriteValueAMD64_OpAMD64TESTW(v)
case OpAMD64XADDLlock:
- return rewriteValueAMD64_OpAMD64XADDLlock(v, config)
+ return rewriteValueAMD64_OpAMD64XADDLlock(v)
case OpAMD64XADDQlock:
- return rewriteValueAMD64_OpAMD64XADDQlock(v, config)
+ return rewriteValueAMD64_OpAMD64XADDQlock(v)
case OpAMD64XCHGL:
- return rewriteValueAMD64_OpAMD64XCHGL(v, config)
+ return rewriteValueAMD64_OpAMD64XCHGL(v)
case OpAMD64XCHGQ:
- return rewriteValueAMD64_OpAMD64XCHGQ(v, config)
+ return rewriteValueAMD64_OpAMD64XCHGQ(v)
case OpAMD64XORL:
- return rewriteValueAMD64_OpAMD64XORL(v, config)
+ return rewriteValueAMD64_OpAMD64XORL(v)
case OpAMD64XORLconst:
- return rewriteValueAMD64_OpAMD64XORLconst(v, config)
+ return rewriteValueAMD64_OpAMD64XORLconst(v)
case OpAMD64XORQ:
- return rewriteValueAMD64_OpAMD64XORQ(v, config)
+ return rewriteValueAMD64_OpAMD64XORQ(v)
case OpAMD64XORQconst:
- return rewriteValueAMD64_OpAMD64XORQconst(v, config)
+ return rewriteValueAMD64_OpAMD64XORQconst(v)
case OpAdd16:
- return rewriteValueAMD64_OpAdd16(v, config)
+ return rewriteValueAMD64_OpAdd16(v)
case OpAdd32:
- return rewriteValueAMD64_OpAdd32(v, config)
+ return rewriteValueAMD64_OpAdd32(v)
case OpAdd32F:
- return rewriteValueAMD64_OpAdd32F(v, config)
+ return rewriteValueAMD64_OpAdd32F(v)
case OpAdd64:
- return rewriteValueAMD64_OpAdd64(v, config)
+ return rewriteValueAMD64_OpAdd64(v)
case OpAdd64F:
- return rewriteValueAMD64_OpAdd64F(v, config)
+ return rewriteValueAMD64_OpAdd64F(v)
case OpAdd8:
- return rewriteValueAMD64_OpAdd8(v, config)
+ return rewriteValueAMD64_OpAdd8(v)
case OpAddPtr:
- return rewriteValueAMD64_OpAddPtr(v, config)
+ return rewriteValueAMD64_OpAddPtr(v)
case OpAddr:
- return rewriteValueAMD64_OpAddr(v, config)
+ return rewriteValueAMD64_OpAddr(v)
case OpAnd16:
- return rewriteValueAMD64_OpAnd16(v, config)
+ return rewriteValueAMD64_OpAnd16(v)
case OpAnd32:
- return rewriteValueAMD64_OpAnd32(v, config)
+ return rewriteValueAMD64_OpAnd32(v)
case OpAnd64:
- return rewriteValueAMD64_OpAnd64(v, config)
+ return rewriteValueAMD64_OpAnd64(v)
case OpAnd8:
- return rewriteValueAMD64_OpAnd8(v, config)
+ return rewriteValueAMD64_OpAnd8(v)
case OpAndB:
- return rewriteValueAMD64_OpAndB(v, config)
+ return rewriteValueAMD64_OpAndB(v)
case OpAtomicAdd32:
- return rewriteValueAMD64_OpAtomicAdd32(v, config)
+ return rewriteValueAMD64_OpAtomicAdd32(v)
case OpAtomicAdd64:
- return rewriteValueAMD64_OpAtomicAdd64(v, config)
+ return rewriteValueAMD64_OpAtomicAdd64(v)
case OpAtomicAnd8:
- return rewriteValueAMD64_OpAtomicAnd8(v, config)
+ return rewriteValueAMD64_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
- return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config)
+ return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
case OpAtomicCompareAndSwap64:
- return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config)
+ return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
case OpAtomicExchange32:
- return rewriteValueAMD64_OpAtomicExchange32(v, config)
+ return rewriteValueAMD64_OpAtomicExchange32(v)
case OpAtomicExchange64:
- return rewriteValueAMD64_OpAtomicExchange64(v, config)
+ return rewriteValueAMD64_OpAtomicExchange64(v)
case OpAtomicLoad32:
- return rewriteValueAMD64_OpAtomicLoad32(v, config)
+ return rewriteValueAMD64_OpAtomicLoad32(v)
case OpAtomicLoad64:
- return rewriteValueAMD64_OpAtomicLoad64(v, config)
+ return rewriteValueAMD64_OpAtomicLoad64(v)
case OpAtomicLoadPtr:
- return rewriteValueAMD64_OpAtomicLoadPtr(v, config)
+ return rewriteValueAMD64_OpAtomicLoadPtr(v)
case OpAtomicOr8:
- return rewriteValueAMD64_OpAtomicOr8(v, config)
+ return rewriteValueAMD64_OpAtomicOr8(v)
case OpAtomicStore32:
- return rewriteValueAMD64_OpAtomicStore32(v, config)
+ return rewriteValueAMD64_OpAtomicStore32(v)
case OpAtomicStore64:
- return rewriteValueAMD64_OpAtomicStore64(v, config)
+ return rewriteValueAMD64_OpAtomicStore64(v)
case OpAtomicStorePtrNoWB:
- return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config)
+ return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
case OpAvg64u:
- return rewriteValueAMD64_OpAvg64u(v, config)
+ return rewriteValueAMD64_OpAvg64u(v)
case OpBitLen32:
- return rewriteValueAMD64_OpBitLen32(v, config)
+ return rewriteValueAMD64_OpBitLen32(v)
case OpBitLen64:
- return rewriteValueAMD64_OpBitLen64(v, config)
+ return rewriteValueAMD64_OpBitLen64(v)
case OpBswap32:
- return rewriteValueAMD64_OpBswap32(v, config)
+ return rewriteValueAMD64_OpBswap32(v)
case OpBswap64:
- return rewriteValueAMD64_OpBswap64(v, config)
+ return rewriteValueAMD64_OpBswap64(v)
case OpClosureCall:
- return rewriteValueAMD64_OpClosureCall(v, config)
+ return rewriteValueAMD64_OpClosureCall(v)
case OpCom16:
- return rewriteValueAMD64_OpCom16(v, config)
+ return rewriteValueAMD64_OpCom16(v)
case OpCom32:
- return rewriteValueAMD64_OpCom32(v, config)
+ return rewriteValueAMD64_OpCom32(v)
case OpCom64:
- return rewriteValueAMD64_OpCom64(v, config)
+ return rewriteValueAMD64_OpCom64(v)
case OpCom8:
- return rewriteValueAMD64_OpCom8(v, config)
+ return rewriteValueAMD64_OpCom8(v)
case OpConst16:
- return rewriteValueAMD64_OpConst16(v, config)
+ return rewriteValueAMD64_OpConst16(v)
case OpConst32:
- return rewriteValueAMD64_OpConst32(v, config)
+ return rewriteValueAMD64_OpConst32(v)
case OpConst32F:
- return rewriteValueAMD64_OpConst32F(v, config)
+ return rewriteValueAMD64_OpConst32F(v)
case OpConst64:
- return rewriteValueAMD64_OpConst64(v, config)
+ return rewriteValueAMD64_OpConst64(v)
case OpConst64F:
- return rewriteValueAMD64_OpConst64F(v, config)
+ return rewriteValueAMD64_OpConst64F(v)
case OpConst8:
- return rewriteValueAMD64_OpConst8(v, config)
+ return rewriteValueAMD64_OpConst8(v)
case OpConstBool:
- return rewriteValueAMD64_OpConstBool(v, config)
+ return rewriteValueAMD64_OpConstBool(v)
case OpConstNil:
- return rewriteValueAMD64_OpConstNil(v, config)
+ return rewriteValueAMD64_OpConstNil(v)
case OpConvert:
- return rewriteValueAMD64_OpConvert(v, config)
+ return rewriteValueAMD64_OpConvert(v)
case OpCtz32:
- return rewriteValueAMD64_OpCtz32(v, config)
+ return rewriteValueAMD64_OpCtz32(v)
case OpCtz64:
- return rewriteValueAMD64_OpCtz64(v, config)
+ return rewriteValueAMD64_OpCtz64(v)
case OpCvt32Fto32:
- return rewriteValueAMD64_OpCvt32Fto32(v, config)
+ return rewriteValueAMD64_OpCvt32Fto32(v)
case OpCvt32Fto64:
- return rewriteValueAMD64_OpCvt32Fto64(v, config)
+ return rewriteValueAMD64_OpCvt32Fto64(v)
case OpCvt32Fto64F:
- return rewriteValueAMD64_OpCvt32Fto64F(v, config)
+ return rewriteValueAMD64_OpCvt32Fto64F(v)
case OpCvt32to32F:
- return rewriteValueAMD64_OpCvt32to32F(v, config)
+ return rewriteValueAMD64_OpCvt32to32F(v)
case OpCvt32to64F:
- return rewriteValueAMD64_OpCvt32to64F(v, config)
+ return rewriteValueAMD64_OpCvt32to64F(v)
case OpCvt64Fto32:
- return rewriteValueAMD64_OpCvt64Fto32(v, config)
+ return rewriteValueAMD64_OpCvt64Fto32(v)
case OpCvt64Fto32F:
- return rewriteValueAMD64_OpCvt64Fto32F(v, config)
+ return rewriteValueAMD64_OpCvt64Fto32F(v)
case OpCvt64Fto64:
- return rewriteValueAMD64_OpCvt64Fto64(v, config)
+ return rewriteValueAMD64_OpCvt64Fto64(v)
case OpCvt64to32F:
- return rewriteValueAMD64_OpCvt64to32F(v, config)
+ return rewriteValueAMD64_OpCvt64to32F(v)
case OpCvt64to64F:
- return rewriteValueAMD64_OpCvt64to64F(v, config)
+ return rewriteValueAMD64_OpCvt64to64F(v)
case OpDiv128u:
- return rewriteValueAMD64_OpDiv128u(v, config)
+ return rewriteValueAMD64_OpDiv128u(v)
case OpDiv16:
- return rewriteValueAMD64_OpDiv16(v, config)
+ return rewriteValueAMD64_OpDiv16(v)
case OpDiv16u:
- return rewriteValueAMD64_OpDiv16u(v, config)
+ return rewriteValueAMD64_OpDiv16u(v)
case OpDiv32:
- return rewriteValueAMD64_OpDiv32(v, config)
+ return rewriteValueAMD64_OpDiv32(v)
case OpDiv32F:
- return rewriteValueAMD64_OpDiv32F(v, config)
+ return rewriteValueAMD64_OpDiv32F(v)
case OpDiv32u:
- return rewriteValueAMD64_OpDiv32u(v, config)
+ return rewriteValueAMD64_OpDiv32u(v)
case OpDiv64:
- return rewriteValueAMD64_OpDiv64(v, config)
+ return rewriteValueAMD64_OpDiv64(v)
case OpDiv64F:
- return rewriteValueAMD64_OpDiv64F(v, config)
+ return rewriteValueAMD64_OpDiv64F(v)
case OpDiv64u:
- return rewriteValueAMD64_OpDiv64u(v, config)
+ return rewriteValueAMD64_OpDiv64u(v)
case OpDiv8:
- return rewriteValueAMD64_OpDiv8(v, config)
+ return rewriteValueAMD64_OpDiv8(v)
case OpDiv8u:
- return rewriteValueAMD64_OpDiv8u(v, config)
+ return rewriteValueAMD64_OpDiv8u(v)
case OpEq16:
- return rewriteValueAMD64_OpEq16(v, config)
+ return rewriteValueAMD64_OpEq16(v)
case OpEq32:
- return rewriteValueAMD64_OpEq32(v, config)
+ return rewriteValueAMD64_OpEq32(v)
case OpEq32F:
- return rewriteValueAMD64_OpEq32F(v, config)
+ return rewriteValueAMD64_OpEq32F(v)
case OpEq64:
- return rewriteValueAMD64_OpEq64(v, config)
+ return rewriteValueAMD64_OpEq64(v)
case OpEq64F:
- return rewriteValueAMD64_OpEq64F(v, config)
+ return rewriteValueAMD64_OpEq64F(v)
case OpEq8:
- return rewriteValueAMD64_OpEq8(v, config)
+ return rewriteValueAMD64_OpEq8(v)
case OpEqB:
- return rewriteValueAMD64_OpEqB(v, config)
+ return rewriteValueAMD64_OpEqB(v)
case OpEqPtr:
- return rewriteValueAMD64_OpEqPtr(v, config)
+ return rewriteValueAMD64_OpEqPtr(v)
case OpGeq16:
- return rewriteValueAMD64_OpGeq16(v, config)
+ return rewriteValueAMD64_OpGeq16(v)
case OpGeq16U:
- return rewriteValueAMD64_OpGeq16U(v, config)
+ return rewriteValueAMD64_OpGeq16U(v)
case OpGeq32:
- return rewriteValueAMD64_OpGeq32(v, config)
+ return rewriteValueAMD64_OpGeq32(v)
case OpGeq32F:
- return rewriteValueAMD64_OpGeq32F(v, config)
+ return rewriteValueAMD64_OpGeq32F(v)
case OpGeq32U:
- return rewriteValueAMD64_OpGeq32U(v, config)
+ return rewriteValueAMD64_OpGeq32U(v)
case OpGeq64:
- return rewriteValueAMD64_OpGeq64(v, config)
+ return rewriteValueAMD64_OpGeq64(v)
case OpGeq64F:
- return rewriteValueAMD64_OpGeq64F(v, config)
+ return rewriteValueAMD64_OpGeq64F(v)
case OpGeq64U:
- return rewriteValueAMD64_OpGeq64U(v, config)
+ return rewriteValueAMD64_OpGeq64U(v)
case OpGeq8:
- return rewriteValueAMD64_OpGeq8(v, config)
+ return rewriteValueAMD64_OpGeq8(v)
case OpGeq8U:
- return rewriteValueAMD64_OpGeq8U(v, config)
+ return rewriteValueAMD64_OpGeq8U(v)
case OpGetClosurePtr:
- return rewriteValueAMD64_OpGetClosurePtr(v, config)
+ return rewriteValueAMD64_OpGetClosurePtr(v)
case OpGetG:
- return rewriteValueAMD64_OpGetG(v, config)
+ return rewriteValueAMD64_OpGetG(v)
case OpGreater16:
- return rewriteValueAMD64_OpGreater16(v, config)
+ return rewriteValueAMD64_OpGreater16(v)
case OpGreater16U:
- return rewriteValueAMD64_OpGreater16U(v, config)
+ return rewriteValueAMD64_OpGreater16U(v)
case OpGreater32:
- return rewriteValueAMD64_OpGreater32(v, config)
+ return rewriteValueAMD64_OpGreater32(v)
case OpGreater32F:
- return rewriteValueAMD64_OpGreater32F(v, config)
+ return rewriteValueAMD64_OpGreater32F(v)
case OpGreater32U:
- return rewriteValueAMD64_OpGreater32U(v, config)
+ return rewriteValueAMD64_OpGreater32U(v)
case OpGreater64:
- return rewriteValueAMD64_OpGreater64(v, config)
+ return rewriteValueAMD64_OpGreater64(v)
case OpGreater64F:
- return rewriteValueAMD64_OpGreater64F(v, config)
+ return rewriteValueAMD64_OpGreater64F(v)
case OpGreater64U:
- return rewriteValueAMD64_OpGreater64U(v, config)
+ return rewriteValueAMD64_OpGreater64U(v)
case OpGreater8:
- return rewriteValueAMD64_OpGreater8(v, config)
+ return rewriteValueAMD64_OpGreater8(v)
case OpGreater8U:
- return rewriteValueAMD64_OpGreater8U(v, config)
+ return rewriteValueAMD64_OpGreater8U(v)
case OpHmul32:
- return rewriteValueAMD64_OpHmul32(v, config)
+ return rewriteValueAMD64_OpHmul32(v)
case OpHmul32u:
- return rewriteValueAMD64_OpHmul32u(v, config)
+ return rewriteValueAMD64_OpHmul32u(v)
case OpHmul64:
- return rewriteValueAMD64_OpHmul64(v, config)
+ return rewriteValueAMD64_OpHmul64(v)
case OpHmul64u:
- return rewriteValueAMD64_OpHmul64u(v, config)
+ return rewriteValueAMD64_OpHmul64u(v)
case OpInt64Hi:
- return rewriteValueAMD64_OpInt64Hi(v, config)
+ return rewriteValueAMD64_OpInt64Hi(v)
case OpInterCall:
- return rewriteValueAMD64_OpInterCall(v, config)
+ return rewriteValueAMD64_OpInterCall(v)
case OpIsInBounds:
- return rewriteValueAMD64_OpIsInBounds(v, config)
+ return rewriteValueAMD64_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValueAMD64_OpIsNonNil(v, config)
+ return rewriteValueAMD64_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValueAMD64_OpIsSliceInBounds(v, config)
+ return rewriteValueAMD64_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValueAMD64_OpLeq16(v, config)
+ return rewriteValueAMD64_OpLeq16(v)
case OpLeq16U:
- return rewriteValueAMD64_OpLeq16U(v, config)
+ return rewriteValueAMD64_OpLeq16U(v)
case OpLeq32:
- return rewriteValueAMD64_OpLeq32(v, config)
+ return rewriteValueAMD64_OpLeq32(v)
case OpLeq32F:
- return rewriteValueAMD64_OpLeq32F(v, config)
+ return rewriteValueAMD64_OpLeq32F(v)
case OpLeq32U:
- return rewriteValueAMD64_OpLeq32U(v, config)
+ return rewriteValueAMD64_OpLeq32U(v)
case OpLeq64:
- return rewriteValueAMD64_OpLeq64(v, config)
+ return rewriteValueAMD64_OpLeq64(v)
case OpLeq64F:
- return rewriteValueAMD64_OpLeq64F(v, config)
+ return rewriteValueAMD64_OpLeq64F(v)
case OpLeq64U:
- return rewriteValueAMD64_OpLeq64U(v, config)
+ return rewriteValueAMD64_OpLeq64U(v)
case OpLeq8:
- return rewriteValueAMD64_OpLeq8(v, config)
+ return rewriteValueAMD64_OpLeq8(v)
case OpLeq8U:
- return rewriteValueAMD64_OpLeq8U(v, config)
+ return rewriteValueAMD64_OpLeq8U(v)
case OpLess16:
- return rewriteValueAMD64_OpLess16(v, config)
+ return rewriteValueAMD64_OpLess16(v)
case OpLess16U:
- return rewriteValueAMD64_OpLess16U(v, config)
+ return rewriteValueAMD64_OpLess16U(v)
case OpLess32:
- return rewriteValueAMD64_OpLess32(v, config)
+ return rewriteValueAMD64_OpLess32(v)
case OpLess32F:
- return rewriteValueAMD64_OpLess32F(v, config)
+ return rewriteValueAMD64_OpLess32F(v)
case OpLess32U:
- return rewriteValueAMD64_OpLess32U(v, config)
+ return rewriteValueAMD64_OpLess32U(v)
case OpLess64:
- return rewriteValueAMD64_OpLess64(v, config)
+ return rewriteValueAMD64_OpLess64(v)
case OpLess64F:
- return rewriteValueAMD64_OpLess64F(v, config)
+ return rewriteValueAMD64_OpLess64F(v)
case OpLess64U:
- return rewriteValueAMD64_OpLess64U(v, config)
+ return rewriteValueAMD64_OpLess64U(v)
case OpLess8:
- return rewriteValueAMD64_OpLess8(v, config)
+ return rewriteValueAMD64_OpLess8(v)
case OpLess8U:
- return rewriteValueAMD64_OpLess8U(v, config)
+ return rewriteValueAMD64_OpLess8U(v)
case OpLoad:
- return rewriteValueAMD64_OpLoad(v, config)
+ return rewriteValueAMD64_OpLoad(v)
case OpLsh16x16:
- return rewriteValueAMD64_OpLsh16x16(v, config)
+ return rewriteValueAMD64_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValueAMD64_OpLsh16x32(v, config)
+ return rewriteValueAMD64_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValueAMD64_OpLsh16x64(v, config)
+ return rewriteValueAMD64_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValueAMD64_OpLsh16x8(v, config)
+ return rewriteValueAMD64_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValueAMD64_OpLsh32x16(v, config)
+ return rewriteValueAMD64_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValueAMD64_OpLsh32x32(v, config)
+ return rewriteValueAMD64_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValueAMD64_OpLsh32x64(v, config)
+ return rewriteValueAMD64_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValueAMD64_OpLsh32x8(v, config)
+ return rewriteValueAMD64_OpLsh32x8(v)
case OpLsh64x16:
- return rewriteValueAMD64_OpLsh64x16(v, config)
+ return rewriteValueAMD64_OpLsh64x16(v)
case OpLsh64x32:
- return rewriteValueAMD64_OpLsh64x32(v, config)
+ return rewriteValueAMD64_OpLsh64x32(v)
case OpLsh64x64:
- return rewriteValueAMD64_OpLsh64x64(v, config)
+ return rewriteValueAMD64_OpLsh64x64(v)
case OpLsh64x8:
- return rewriteValueAMD64_OpLsh64x8(v, config)
+ return rewriteValueAMD64_OpLsh64x8(v)
case OpLsh8x16:
- return rewriteValueAMD64_OpLsh8x16(v, config)
+ return rewriteValueAMD64_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValueAMD64_OpLsh8x32(v, config)
+ return rewriteValueAMD64_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValueAMD64_OpLsh8x64(v, config)
+ return rewriteValueAMD64_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValueAMD64_OpLsh8x8(v, config)
+ return rewriteValueAMD64_OpLsh8x8(v)
case OpMod16:
- return rewriteValueAMD64_OpMod16(v, config)
+ return rewriteValueAMD64_OpMod16(v)
case OpMod16u:
- return rewriteValueAMD64_OpMod16u(v, config)
+ return rewriteValueAMD64_OpMod16u(v)
case OpMod32:
- return rewriteValueAMD64_OpMod32(v, config)
+ return rewriteValueAMD64_OpMod32(v)
case OpMod32u:
- return rewriteValueAMD64_OpMod32u(v, config)
+ return rewriteValueAMD64_OpMod32u(v)
case OpMod64:
- return rewriteValueAMD64_OpMod64(v, config)
+ return rewriteValueAMD64_OpMod64(v)
case OpMod64u:
- return rewriteValueAMD64_OpMod64u(v, config)
+ return rewriteValueAMD64_OpMod64u(v)
case OpMod8:
- return rewriteValueAMD64_OpMod8(v, config)
+ return rewriteValueAMD64_OpMod8(v)
case OpMod8u:
- return rewriteValueAMD64_OpMod8u(v, config)
+ return rewriteValueAMD64_OpMod8u(v)
case OpMove:
- return rewriteValueAMD64_OpMove(v, config)
+ return rewriteValueAMD64_OpMove(v)
case OpMul16:
- return rewriteValueAMD64_OpMul16(v, config)
+ return rewriteValueAMD64_OpMul16(v)
case OpMul32:
- return rewriteValueAMD64_OpMul32(v, config)
+ return rewriteValueAMD64_OpMul32(v)
case OpMul32F:
- return rewriteValueAMD64_OpMul32F(v, config)
+ return rewriteValueAMD64_OpMul32F(v)
case OpMul64:
- return rewriteValueAMD64_OpMul64(v, config)
+ return rewriteValueAMD64_OpMul64(v)
case OpMul64F:
- return rewriteValueAMD64_OpMul64F(v, config)
+ return rewriteValueAMD64_OpMul64F(v)
case OpMul64uhilo:
- return rewriteValueAMD64_OpMul64uhilo(v, config)
+ return rewriteValueAMD64_OpMul64uhilo(v)
case OpMul8:
- return rewriteValueAMD64_OpMul8(v, config)
+ return rewriteValueAMD64_OpMul8(v)
case OpNeg16:
- return rewriteValueAMD64_OpNeg16(v, config)
+ return rewriteValueAMD64_OpNeg16(v)
case OpNeg32:
- return rewriteValueAMD64_OpNeg32(v, config)
+ return rewriteValueAMD64_OpNeg32(v)
case OpNeg32F:
- return rewriteValueAMD64_OpNeg32F(v, config)
+ return rewriteValueAMD64_OpNeg32F(v)
case OpNeg64:
- return rewriteValueAMD64_OpNeg64(v, config)
+ return rewriteValueAMD64_OpNeg64(v)
case OpNeg64F:
- return rewriteValueAMD64_OpNeg64F(v, config)
+ return rewriteValueAMD64_OpNeg64F(v)
case OpNeg8:
- return rewriteValueAMD64_OpNeg8(v, config)
+ return rewriteValueAMD64_OpNeg8(v)
case OpNeq16:
- return rewriteValueAMD64_OpNeq16(v, config)
+ return rewriteValueAMD64_OpNeq16(v)
case OpNeq32:
- return rewriteValueAMD64_OpNeq32(v, config)
+ return rewriteValueAMD64_OpNeq32(v)
case OpNeq32F:
- return rewriteValueAMD64_OpNeq32F(v, config)
+ return rewriteValueAMD64_OpNeq32F(v)
case OpNeq64:
- return rewriteValueAMD64_OpNeq64(v, config)
+ return rewriteValueAMD64_OpNeq64(v)
case OpNeq64F:
- return rewriteValueAMD64_OpNeq64F(v, config)
+ return rewriteValueAMD64_OpNeq64F(v)
case OpNeq8:
- return rewriteValueAMD64_OpNeq8(v, config)
+ return rewriteValueAMD64_OpNeq8(v)
case OpNeqB:
- return rewriteValueAMD64_OpNeqB(v, config)
+ return rewriteValueAMD64_OpNeqB(v)
case OpNeqPtr:
- return rewriteValueAMD64_OpNeqPtr(v, config)
+ return rewriteValueAMD64_OpNeqPtr(v)
case OpNilCheck:
- return rewriteValueAMD64_OpNilCheck(v, config)
+ return rewriteValueAMD64_OpNilCheck(v)
case OpNot:
- return rewriteValueAMD64_OpNot(v, config)
+ return rewriteValueAMD64_OpNot(v)
case OpOffPtr:
- return rewriteValueAMD64_OpOffPtr(v, config)
+ return rewriteValueAMD64_OpOffPtr(v)
case OpOr16:
- return rewriteValueAMD64_OpOr16(v, config)
+ return rewriteValueAMD64_OpOr16(v)
case OpOr32:
- return rewriteValueAMD64_OpOr32(v, config)
+ return rewriteValueAMD64_OpOr32(v)
case OpOr64:
- return rewriteValueAMD64_OpOr64(v, config)
+ return rewriteValueAMD64_OpOr64(v)
case OpOr8:
- return rewriteValueAMD64_OpOr8(v, config)
+ return rewriteValueAMD64_OpOr8(v)
case OpOrB:
- return rewriteValueAMD64_OpOrB(v, config)
+ return rewriteValueAMD64_OpOrB(v)
case OpRound32F:
- return rewriteValueAMD64_OpRound32F(v, config)
+ return rewriteValueAMD64_OpRound32F(v)
case OpRound64F:
- return rewriteValueAMD64_OpRound64F(v, config)
+ return rewriteValueAMD64_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValueAMD64_OpRsh16Ux16(v, config)
+ return rewriteValueAMD64_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValueAMD64_OpRsh16Ux32(v, config)
+ return rewriteValueAMD64_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValueAMD64_OpRsh16Ux64(v, config)
+ return rewriteValueAMD64_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValueAMD64_OpRsh16Ux8(v, config)
+ return rewriteValueAMD64_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValueAMD64_OpRsh16x16(v, config)
+ return rewriteValueAMD64_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValueAMD64_OpRsh16x32(v, config)
+ return rewriteValueAMD64_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValueAMD64_OpRsh16x64(v, config)
+ return rewriteValueAMD64_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValueAMD64_OpRsh16x8(v, config)
+ return rewriteValueAMD64_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValueAMD64_OpRsh32Ux16(v, config)
+ return rewriteValueAMD64_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValueAMD64_OpRsh32Ux32(v, config)
+ return rewriteValueAMD64_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValueAMD64_OpRsh32Ux64(v, config)
+ return rewriteValueAMD64_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValueAMD64_OpRsh32Ux8(v, config)
+ return rewriteValueAMD64_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValueAMD64_OpRsh32x16(v, config)
+ return rewriteValueAMD64_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValueAMD64_OpRsh32x32(v, config)
+ return rewriteValueAMD64_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValueAMD64_OpRsh32x64(v, config)
+ return rewriteValueAMD64_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValueAMD64_OpRsh32x8(v, config)
+ return rewriteValueAMD64_OpRsh32x8(v)
case OpRsh64Ux16:
- return rewriteValueAMD64_OpRsh64Ux16(v, config)
+ return rewriteValueAMD64_OpRsh64Ux16(v)
case OpRsh64Ux32:
- return rewriteValueAMD64_OpRsh64Ux32(v, config)
+ return rewriteValueAMD64_OpRsh64Ux32(v)
case OpRsh64Ux64:
- return rewriteValueAMD64_OpRsh64Ux64(v, config)
+ return rewriteValueAMD64_OpRsh64Ux64(v)
case OpRsh64Ux8:
- return rewriteValueAMD64_OpRsh64Ux8(v, config)
+ return rewriteValueAMD64_OpRsh64Ux8(v)
case OpRsh64x16:
- return rewriteValueAMD64_OpRsh64x16(v, config)
+ return rewriteValueAMD64_OpRsh64x16(v)
case OpRsh64x32:
- return rewriteValueAMD64_OpRsh64x32(v, config)
+ return rewriteValueAMD64_OpRsh64x32(v)
case OpRsh64x64:
- return rewriteValueAMD64_OpRsh64x64(v, config)
+ return rewriteValueAMD64_OpRsh64x64(v)
case OpRsh64x8:
- return rewriteValueAMD64_OpRsh64x8(v, config)
+ return rewriteValueAMD64_OpRsh64x8(v)
case OpRsh8Ux16:
- return rewriteValueAMD64_OpRsh8Ux16(v, config)
+ return rewriteValueAMD64_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValueAMD64_OpRsh8Ux32(v, config)
+ return rewriteValueAMD64_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValueAMD64_OpRsh8Ux64(v, config)
+ return rewriteValueAMD64_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValueAMD64_OpRsh8Ux8(v, config)
+ return rewriteValueAMD64_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValueAMD64_OpRsh8x16(v, config)
+ return rewriteValueAMD64_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValueAMD64_OpRsh8x32(v, config)
+ return rewriteValueAMD64_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValueAMD64_OpRsh8x64(v, config)
+ return rewriteValueAMD64_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValueAMD64_OpRsh8x8(v, config)
+ return rewriteValueAMD64_OpRsh8x8(v)
case OpSelect0:
- return rewriteValueAMD64_OpSelect0(v, config)
+ return rewriteValueAMD64_OpSelect0(v)
case OpSelect1:
- return rewriteValueAMD64_OpSelect1(v, config)
+ return rewriteValueAMD64_OpSelect1(v)
case OpSignExt16to32:
- return rewriteValueAMD64_OpSignExt16to32(v, config)
+ return rewriteValueAMD64_OpSignExt16to32(v)
case OpSignExt16to64:
- return rewriteValueAMD64_OpSignExt16to64(v, config)
+ return rewriteValueAMD64_OpSignExt16to64(v)
case OpSignExt32to64:
- return rewriteValueAMD64_OpSignExt32to64(v, config)
+ return rewriteValueAMD64_OpSignExt32to64(v)
case OpSignExt8to16:
- return rewriteValueAMD64_OpSignExt8to16(v, config)
+ return rewriteValueAMD64_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValueAMD64_OpSignExt8to32(v, config)
+ return rewriteValueAMD64_OpSignExt8to32(v)
case OpSignExt8to64:
- return rewriteValueAMD64_OpSignExt8to64(v, config)
+ return rewriteValueAMD64_OpSignExt8to64(v)
case OpSlicemask:
- return rewriteValueAMD64_OpSlicemask(v, config)
+ return rewriteValueAMD64_OpSlicemask(v)
case OpSqrt:
- return rewriteValueAMD64_OpSqrt(v, config)
+ return rewriteValueAMD64_OpSqrt(v)
case OpStaticCall:
- return rewriteValueAMD64_OpStaticCall(v, config)
+ return rewriteValueAMD64_OpStaticCall(v)
case OpStore:
- return rewriteValueAMD64_OpStore(v, config)
+ return rewriteValueAMD64_OpStore(v)
case OpSub16:
- return rewriteValueAMD64_OpSub16(v, config)
+ return rewriteValueAMD64_OpSub16(v)
case OpSub32:
- return rewriteValueAMD64_OpSub32(v, config)
+ return rewriteValueAMD64_OpSub32(v)
case OpSub32F:
- return rewriteValueAMD64_OpSub32F(v, config)
+ return rewriteValueAMD64_OpSub32F(v)
case OpSub64:
- return rewriteValueAMD64_OpSub64(v, config)
+ return rewriteValueAMD64_OpSub64(v)
case OpSub64F:
- return rewriteValueAMD64_OpSub64F(v, config)
+ return rewriteValueAMD64_OpSub64F(v)
case OpSub8:
- return rewriteValueAMD64_OpSub8(v, config)
+ return rewriteValueAMD64_OpSub8(v)
case OpSubPtr:
- return rewriteValueAMD64_OpSubPtr(v, config)
+ return rewriteValueAMD64_OpSubPtr(v)
case OpTrunc16to8:
- return rewriteValueAMD64_OpTrunc16to8(v, config)
+ return rewriteValueAMD64_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValueAMD64_OpTrunc32to16(v, config)
+ return rewriteValueAMD64_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValueAMD64_OpTrunc32to8(v, config)
+ return rewriteValueAMD64_OpTrunc32to8(v)
case OpTrunc64to16:
- return rewriteValueAMD64_OpTrunc64to16(v, config)
+ return rewriteValueAMD64_OpTrunc64to16(v)
case OpTrunc64to32:
- return rewriteValueAMD64_OpTrunc64to32(v, config)
+ return rewriteValueAMD64_OpTrunc64to32(v)
case OpTrunc64to8:
- return rewriteValueAMD64_OpTrunc64to8(v, config)
+ return rewriteValueAMD64_OpTrunc64to8(v)
case OpXor16:
- return rewriteValueAMD64_OpXor16(v, config)
+ return rewriteValueAMD64_OpXor16(v)
case OpXor32:
- return rewriteValueAMD64_OpXor32(v, config)
+ return rewriteValueAMD64_OpXor32(v)
case OpXor64:
- return rewriteValueAMD64_OpXor64(v, config)
+ return rewriteValueAMD64_OpXor64(v)
case OpXor8:
- return rewriteValueAMD64_OpXor8(v, config)
+ return rewriteValueAMD64_OpXor8(v)
case OpZero:
- return rewriteValueAMD64_OpZero(v, config)
+ return rewriteValueAMD64_OpZero(v)
case OpZeroExt16to32:
- return rewriteValueAMD64_OpZeroExt16to32(v, config)
+ return rewriteValueAMD64_OpZeroExt16to32(v)
case OpZeroExt16to64:
- return rewriteValueAMD64_OpZeroExt16to64(v, config)
+ return rewriteValueAMD64_OpZeroExt16to64(v)
case OpZeroExt32to64:
- return rewriteValueAMD64_OpZeroExt32to64(v, config)
+ return rewriteValueAMD64_OpZeroExt32to64(v)
case OpZeroExt8to16:
- return rewriteValueAMD64_OpZeroExt8to16(v, config)
+ return rewriteValueAMD64_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValueAMD64_OpZeroExt8to32(v, config)
+ return rewriteValueAMD64_OpZeroExt8to32(v)
case OpZeroExt8to64:
- return rewriteValueAMD64_OpZeroExt8to64(v, config)
+ return rewriteValueAMD64_OpZeroExt8to64(v)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
// match: (ADDL x (MOVLconst [c]))
// cond:
// result: (ADDLconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
// match: (ADDLconst [c] x)
// cond: int32(c)==0
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
// match: (ADDQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ADDQconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
// match: (ADDQconst [c] (ADDQ x y))
// cond:
// result: (LEAQ1 [c] x y)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDSD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
// match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: l.Uses == 1 && canMergeLoad(v, l) && clobber(l)
// result: (ADDSDmem x [off] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64ADDSS(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
// match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: l.Uses == 1 && canMergeLoad(v, l) && clobber(l)
// result: (ADDSSmem x [off] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
// match: (ANDL x (MOVLconst [c]))
// cond:
// result: (ANDLconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
// match: (ANDLconst [c] (ANDLconst [d] x))
// cond:
// result: (ANDLconst [c & d] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
// match: (ANDQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ANDQconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
// match: (ANDQconst [c] (ANDQconst [d] x))
// cond:
// result: (ANDQconst [c & d] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64BSFQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
b := v.Block
_ = b
// match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
}
return false
}
-func rewriteValueAMD64_OpAMD64BTQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
// match: (BTQconst [c] x)
// cond: c < 32
// result: (BTLconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
// match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
// cond: c != 0
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
b := v.Block
_ = b
// match: (CMPB x (MOVLconst [c]))
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
// match: (CMPBconst (MOVLconst [x]) [y])
// cond: int8(x)==int8(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
b := v.Block
_ = b
// match: (CMPL x (MOVLconst [c]))
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
// match: (CMPLconst (MOVLconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
b := v.Block
_ = b
// match: (CMPQ x (MOVQconst [c]))
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
// match: (CMPQconst (MOVQconst [x]) [y])
// cond: x==y
// result: (FlagEQ)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
b := v.Block
_ = b
// match: (CMPW x (MOVLconst [c]))
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
// match: (CMPWconst (MOVLconst [x]) [y])
// cond: int16(x)==int16(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
// match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
// cond: is32Bit(off1+off2)
// result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
// match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
// cond: is32Bit(off1+off2)
// result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
// match: (LEAL [c] {s} (ADDLconst [d] x))
// cond: is32Bit(c+d)
// result: (LEAL [c+d] {s} x)
}
return false
}
-func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
// match: (LEAQ [c] {s} (ADDQconst [d] x))
// cond: is32Bit(c+d)
// result: (LEAQ [c+d] {s} x)
}
return false
}
-func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ1 [c+d] {s} x y)
}
return false
}
-func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ2 [c+d] {s} x y)
}
return false
}
-func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ4 [c+d] {s} x y)
}
return false
}
-func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
// cond: is32Bit(c+d) && x.Op != OpSB
// result: (LEAQ8 [c+d] {s} x y)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool {
// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
// cond:
// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
// match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value) bool {
// match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
// cond:
// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
// match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVLatomicload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
// match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool {
// match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
// cond:
// result: (MOVLloadidx4 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool {
// match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
// cond:
// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
// cond:
// result: (MOVLstore [off] {sym} ptr x mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
v.AddArg(p)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
v.AddArg(v0)
v.AddArg(mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
// cond:
// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
v.Aux = s
v.AddArg(p)
v.AddArg(i)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
v.AddArg(v0)
v.AddArg(mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
// cond:
// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
v0.AuxInt = 2
v0.AddArg(i)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
v.AddArg(v1)
v.AddArg(mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool {
// match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
// cond:
// result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool {
b := v.Block
_ = b
// match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
// match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVOload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
// match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVOstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
// match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVQatomicload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
// match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool {
// match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
// cond:
// result: (MOVQloadidx8 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool {
// match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
// cond:
// result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
// match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVQstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
// match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool {
// match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
// cond:
// result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool {
// match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
// cond:
// result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool {
// match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
// cond:
// result: (MOVQstoreidx8 [c] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool {
// match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
// cond:
// result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVSDload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool {
// match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
// cond:
// result: (MOVSDloadidx8 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool {
// match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
// cond:
// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool {
// match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
// cond:
// result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool {
// match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
// cond:
// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVSSload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool {
// match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
// cond:
// result: (MOVSSloadidx4 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool {
// match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
// cond:
// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool {
// match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
// cond:
// result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool {
// match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
// cond:
// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool {
// match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
// cond:
// result: (MOVWloadidx2 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool {
// match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
// cond:
// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
// cond:
// result: (MOVWstore [off] {sym} ptr x mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
// match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool {
// match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
// cond:
// result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool {
// match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem)
// cond:
// result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
// match: (MULL x (MOVLconst [c]))
// cond:
// result: (MULLconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
// match: (MULLconst [c] (MULLconst [d] x))
// cond:
// result: (MULLconst [int64(int32(c * d))] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
// match: (MULQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (MULQconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
b := v.Block
_ = b
// match: (MULQconst [c] (MULQconst [d] x))
}
return false
}
-func rewriteValueAMD64_OpAMD64MULSD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
// match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: l.Uses == 1 && canMergeLoad(v, l) && clobber(l)
// result: (MULSDmem x [off] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64MULSS(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
// match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: l.Uses == 1 && canMergeLoad(v, l) && clobber(l)
// result: (MULSSmem x [off] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
// match: (NEGL (MOVLconst [c]))
// cond:
// result: (MOVLconst [int64(int32(-c))])
}
return false
}
-func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
// match: (NEGQ (MOVQconst [c]))
// cond:
// result: (MOVQconst [-c])
}
return false
}
-func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
// match: (NOTL (MOVLconst [c]))
// cond:
// result: (MOVLconst [^c])
}
return false
}
-func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
// match: (NOTQ (MOVQconst [c]))
// cond:
// result: (MOVQconst [^c])
}
return false
}
-func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ORL x (MOVLconst [c]))
// cond:
// result: (ORLconst [c] x)
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = 8
- v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
v1.AuxInt = i - 1
v1.Aux = s
v1.AddArg(p)
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
v1.AuxInt = i1 - 2
v1.Aux = s
v1.AddArg(p)
}
return false
}
-func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
// match: (ORLconst [c] x)
// cond: int32(c)==0
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ORQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ORQconst [c] x)
break
}
b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
v1.AuxInt = i - 7
v1.Aux = s
v1.AddArg(p)
}
return false
}
-func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
// match: (ORQconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
// match: (ROLBconst [c] (ROLBconst [d] x))
// cond:
// result: (ROLBconst [(c+d)& 7] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
// match: (ROLLconst [c] (ROLLconst [d] x))
// cond:
// result: (ROLLconst [(c+d)&31] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
// match: (ROLQconst [c] (ROLQconst [d] x))
// cond:
// result: (ROLQconst [(c+d)&63] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
// match: (ROLWconst [c] (ROLWconst [d] x))
// cond:
// result: (ROLWconst [(c+d)&15] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
// match: (SARB x (MOVQconst [c]))
// cond:
// result: (SARBconst [min(c&31,7)] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
// match: (SARBconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
// match: (SARL x (MOVQconst [c]))
// cond:
// result: (SARLconst [c&31] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
// match: (SARLconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
// match: (SARQ x (MOVQconst [c]))
// cond:
// result: (SARQconst [c&63] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
// match: (SARQconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
// match: (SARW x (MOVQconst [c]))
// cond:
// result: (SARWconst [min(c&31,15)] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
// match: (SARWconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
// match: (SBBLcarrymask (FlagEQ))
// cond:
// result: (MOVLconst [0])
}
return false
}
-func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
// match: (SBBQcarrymask (FlagEQ))
// cond:
// result: (MOVQconst [0])
}
return false
}
-func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
// match: (SETA (InvertFlags x))
// cond:
// result: (SETB x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
// match: (SETAE (InvertFlags x))
// cond:
// result: (SETBE x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
// match: (SETB (InvertFlags x))
// cond:
// result: (SETA x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
// match: (SETBE (InvertFlags x))
// cond:
// result: (SETAE x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
// cond: !config.nacl
// result: (SETAE (BTL x y))
}
return false
}
-func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
// match: (SETG (InvertFlags x))
// cond:
// result: (SETL x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
// match: (SETGE (InvertFlags x))
// cond:
// result: (SETLE x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
// match: (SETL (InvertFlags x))
// cond:
// result: (SETG x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
// match: (SETLE (InvertFlags x))
// cond:
// result: (SETGE x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
// cond: !config.nacl
// result: (SETB (BTL x y))
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
// match: (SHLL x (MOVQconst [c]))
// cond:
// result: (SHLLconst [c&31] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
// match: (SHLLconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
// match: (SHLQ x (MOVQconst [c]))
// cond:
// result: (SHLQconst [c&63] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
// match: (SHLQconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
// match: (SHRB x (MOVQconst [c]))
// cond: c&31 < 8
// result: (SHRBconst [c&31] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
// match: (SHRBconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
// match: (SHRL x (MOVQconst [c]))
// cond:
// result: (SHRLconst [c&31] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
// match: (SHRLconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
// match: (SHRQ x (MOVQconst [c]))
// cond:
// result: (SHRQconst [c&63] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
// match: (SHRQconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
// match: (SHRW x (MOVQconst [c]))
// cond: c&31 < 16
// result: (SHRWconst [c&31] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
// match: (SHRWconst x [0])
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
b := v.Block
_ = b
// match: (SUBL x (MOVLconst [c]))
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
// match: (SUBLconst [c] x)
// cond: int32(c) == 0
// result: x
return true
}
}
-func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
b := v.Block
_ = b
// match: (SUBQ x (MOVQconst [c]))
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
// match: (SUBQconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBSD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
// match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
// cond: l.Uses == 1 && canMergeLoad(v, l) && clobber(l)
// result: (SUBSDmem x [off] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64SUBSS(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
// match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
// cond: l.Uses == 1 && canMergeLoad(v, l) && clobber(l)
// result: (SUBSSmem x [off] {sym} ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
// match: (TESTB (MOVLconst [c]) x)
// cond:
// result: (TESTBconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
// match: (TESTL y x:(SHLL _ _))
// cond: y.Op != OpAMD64SHLL
// result: (TESTL x y)
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
// match: (TESTQ y x:(SHLQ _ _))
// cond: y.Op != OpAMD64SHLQ
// result: (TESTQ x y)
}
return false
}
-func rewriteValueAMD64_OpAMD64TESTW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
// match: (TESTW (MOVLconst [c]) x)
// cond:
// result: (TESTWconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64XADDLlock(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
// match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (XADDLlock [off1+off2] {sym} val ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64XADDQlock(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
// match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (XADDQlock [off1+off2] {sym} val ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
// match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (XCHGL [off1+off2] {sym} val ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
// match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (XCHGQ [off1+off2] {sym} val ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
// match: (XORL x (MOVLconst [c]))
// cond:
// result: (XORLconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
// match: (XORLconst [c] (XORLconst [d] x))
// cond:
// result: (XORLconst [c ^ d] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
// match: (XORQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (XORQconst [c] x)
}
return false
}
-func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
// match: (XORQconst [c] (XORQconst [d] x))
// cond:
// result: (XORQconst [c ^ d] x)
}
return false
}
-func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAdd16(v *Value) bool {
// match: (Add16 x y)
// cond:
// result: (ADDL x y)
return true
}
}
-func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAdd32(v *Value) bool {
// match: (Add32 x y)
// cond:
// result: (ADDL x y)
return true
}
}
-func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAdd32F(v *Value) bool {
// match: (Add32F x y)
// cond:
// result: (ADDSS x y)
return true
}
}
-func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAdd64(v *Value) bool {
// match: (Add64 x y)
// cond:
// result: (ADDQ x y)
return true
}
}
-func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAdd64F(v *Value) bool {
// match: (Add64F x y)
// cond:
// result: (ADDSD x y)
return true
}
}
-func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAdd8(v *Value) bool {
// match: (Add8 x y)
// cond:
// result: (ADDL x y)
return true
}
}
-func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAddPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (AddPtr x y)
// cond: config.PtrSize == 8
// result: (ADDQ x y)
}
return false
}
-func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAddr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (Addr {sym} base)
// cond: config.PtrSize == 8
// result: (LEAQ {sym} base)
}
return false
}
-func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAnd16(v *Value) bool {
// match: (And16 x y)
// cond:
// result: (ANDL x y)
return true
}
}
-func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAnd32(v *Value) bool {
// match: (And32 x y)
// cond:
// result: (ANDL x y)
return true
}
}
-func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAnd64(v *Value) bool {
// match: (And64 x y)
// cond:
// result: (ANDQ x y)
return true
}
}
-func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAnd8(v *Value) bool {
// match: (And8 x y)
// cond:
// result: (ANDL x y)
return true
}
}
-func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAndB(v *Value) bool {
// match: (AndB x y)
// cond:
// result: (ANDL x y)
return true
}
}
-func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicAdd32 ptr val mem)
// cond:
// result: (AddTupleFirst32 (XADDLlock val ptr mem) val)
val := v.Args[1]
mem := v.Args[2]
v.reset(OpAMD64AddTupleFirst32)
- v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(fe.TypeUInt32(), TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicAdd64 ptr val mem)
// cond:
// result: (AddTupleFirst64 (XADDQlock val ptr mem) val)
val := v.Args[1]
mem := v.Args[2]
v.reset(OpAMD64AddTupleFirst64)
- v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(fe.TypeUInt64(), TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
// match: (AtomicAnd8 ptr val mem)
// cond:
// result: (ANDBlock ptr val mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
// match: (AtomicCompareAndSwap32 ptr old new_ mem)
// cond:
// result: (CMPXCHGLlock ptr old new_ mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
// match: (AtomicCompareAndSwap64 ptr old new_ mem)
// cond:
// result: (CMPXCHGQlock ptr old new_ mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicExchange32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
// match: (AtomicExchange32 ptr val mem)
// cond:
// result: (XCHGL val ptr mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicExchange64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
// match: (AtomicExchange64 ptr val mem)
// cond:
// result: (XCHGQ val ptr mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
// match: (AtomicLoad32 ptr mem)
// cond:
// result: (MOVLatomicload ptr mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
// match: (AtomicLoad64 ptr mem)
// cond:
// result: (MOVQatomicload ptr mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (AtomicLoadPtr ptr mem)
// cond: config.PtrSize == 8
// result: (MOVQatomicload ptr mem)
}
return false
}
-func rewriteValueAMD64_OpAtomicOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
// match: (AtomicOr8 ptr val mem)
// cond:
// result: (ORBlock ptr val mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicStore32 ptr val mem)
// cond:
- // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem))
+ // result: (Select1 (XCHGL <MakeTuple(fe.TypeUInt32(),TypeMem)> val ptr mem))
for {
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(fe.TypeUInt32(), TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicStore64 ptr val mem)
// cond:
- // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem))
+ // result: (Select1 (XCHGQ <MakeTuple(fe.TypeUInt64(),TypeMem)> val ptr mem))
for {
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(fe.TypeUInt64(), TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
return true
}
}
-func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicStorePtrNoWB ptr val mem)
// cond: config.PtrSize == 8
- // result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
+ // result: (Select1 (XCHGQ <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
for {
ptr := v.Args[0]
val := v.Args[1]
break
}
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(fe.TypeBytePtr(), TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
}
// match: (AtomicStorePtrNoWB ptr val mem)
// cond: config.PtrSize == 4
- // result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
+ // result: (Select1 (XCHGL <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
for {
ptr := v.Args[0]
val := v.Args[1]
break
}
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem))
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(fe.TypeBytePtr(), TypeMem))
v0.AddArg(val)
v0.AddArg(ptr)
v0.AddArg(mem)
}
return false
}
-func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpAvg64u(v *Value) bool {
// match: (Avg64u x y)
// cond:
// result: (AVGQU x y)
return true
}
}
-func rewriteValueAMD64_OpBitLen32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpBitLen32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (BitLen32 x)
// cond:
- // result: (BitLen64 (MOVLQZX <config.Frontend().TypeUInt64()> x))
+ // result: (BitLen64 (MOVLQZX <fe.TypeUInt64()> x))
for {
x := v.Args[0]
v.reset(OpBitLen64)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, config.Frontend().TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpBitLen64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpBitLen64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (BitLen64 <t> x)
// cond:
// result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x))))
v.AuxInt = 1
v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
v1 := b.NewValue0(v.Pos, OpSelect0, t)
- v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
v2.AddArg(x)
v1.AddArg(v2)
v0.AddArg(v1)
v3.AuxInt = -1
v0.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
- v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
v5.AddArg(x)
v4.AddArg(v5)
v0.AddArg(v4)
return true
}
}
-func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpBswap32(v *Value) bool {
// match: (Bswap32 x)
// cond:
// result: (BSWAPL x)
return true
}
}
-func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpBswap64(v *Value) bool {
// match: (Bswap64 x)
// cond:
// result: (BSWAPQ x)
return true
}
}
-func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpClosureCall(v *Value) bool {
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
return true
}
}
-func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCom16(v *Value) bool {
// match: (Com16 x)
// cond:
// result: (NOTL x)
return true
}
}
-func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCom32(v *Value) bool {
// match: (Com32 x)
// cond:
// result: (NOTL x)
return true
}
}
-func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCom64(v *Value) bool {
// match: (Com64 x)
// cond:
// result: (NOTQ x)
return true
}
}
-func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCom8(v *Value) bool {
// match: (Com8 x)
// cond:
// result: (NOTL x)
return true
}
}
-func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpConst16(v *Value) bool {
// match: (Const16 [val])
// cond:
// result: (MOVLconst [val])
return true
}
}
-func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpConst32(v *Value) bool {
// match: (Const32 [val])
// cond:
// result: (MOVLconst [val])
return true
}
}
-func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// cond:
// result: (MOVSSconst [val])
return true
}
}
-func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpConst64(v *Value) bool {
// match: (Const64 [val])
// cond:
// result: (MOVQconst [val])
return true
}
}
-func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// cond:
// result: (MOVSDconst [val])
return true
}
}
-func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpConst8(v *Value) bool {
// match: (Const8 [val])
// cond:
// result: (MOVLconst [val])
return true
}
}
-func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// cond:
// result: (MOVLconst [b])
return true
}
}
-func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpConstNil(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (ConstNil)
// cond: config.PtrSize == 8
// result: (MOVQconst [0])
}
return false
}
-func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpConvert(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (Convert <t> x mem)
// cond: config.PtrSize == 8
// result: (MOVQconvert <t> x mem)
}
return false
}
-func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCtz32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Ctz32 x)
// cond:
- // result: (Select0 (BSFQ (ORQ <config.Frontend().TypeUInt64()> (MOVQconst [1<<32]) x)))
+ // result: (Select0 (BSFQ (ORQ <fe.TypeUInt64()> (MOVQconst [1<<32]) x)))
for {
x := v.Args[0]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
- v1 := b.NewValue0(v.Pos, OpAMD64ORQ, config.Frontend().TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64ORQ, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v2.AuxInt = 1 << 32
v1.AddArg(v2)
v1.AddArg(x)
return true
}
}
-func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCtz64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Ctz64 <t> x)
// cond:
// result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
x := v.Args[0]
v.reset(OpAMD64CMOVQEQ)
v0 := b.NewValue0(v.Pos, OpSelect0, t)
- v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
v2.AuxInt = 64
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
- v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
+ v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
v4.AddArg(x)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt32Fto32(v *Value) bool {
// match: (Cvt32Fto32 x)
// cond:
// result: (CVTTSS2SL x)
return true
}
}
-func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt32Fto64(v *Value) bool {
// match: (Cvt32Fto64 x)
// cond:
// result: (CVTTSS2SQ x)
return true
}
}
-func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F x)
// cond:
// result: (CVTSS2SD x)
return true
}
}
-func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt32to32F(v *Value) bool {
// match: (Cvt32to32F x)
// cond:
// result: (CVTSL2SS x)
return true
}
}
-func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt32to64F(v *Value) bool {
// match: (Cvt32to64F x)
// cond:
// result: (CVTSL2SD x)
return true
}
}
-func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt64Fto32(v *Value) bool {
// match: (Cvt64Fto32 x)
// cond:
// result: (CVTTSD2SL x)
return true
}
}
-func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F x)
// cond:
// result: (CVTSD2SS x)
return true
}
}
-func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt64Fto64(v *Value) bool {
// match: (Cvt64Fto64 x)
// cond:
// result: (CVTTSD2SQ x)
return true
}
}
-func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt64to32F(v *Value) bool {
// match: (Cvt64to32F x)
// cond:
// result: (CVTSQ2SS x)
return true
}
}
-func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpCvt64to64F(v *Value) bool {
// match: (Cvt64to64F x)
// cond:
// result: (CVTSQ2SD x)
return true
}
}
-func rewriteValueAMD64_OpDiv128u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpDiv128u(v *Value) bool {
// match: (Div128u xhi xlo y)
// cond:
// result: (DIVQU2 xhi xlo y)
return true
}
}
-func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16 x y)
// cond:
// result: (Select0 (DIVW x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(fe.TypeInt16(), fe.TypeInt16()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16u x y)
// cond:
// result: (Select0 (DIVWU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(fe.TypeUInt16(), fe.TypeUInt16()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32 x y)
// cond:
// result: (Select0 (DIVL x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpDiv32F(v *Value) bool {
// match: (Div32F x y)
// cond:
// result: (DIVSS x y)
return true
}
}
-func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32u x y)
// cond:
// result: (Select0 (DIVLU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div64 x y)
// cond:
// result: (Select0 (DIVQ x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpDiv64F(v *Value) bool {
// match: (Div64F x y)
// cond:
// result: (DIVSD x y)
return true
}
}
-func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv64u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div64u x y)
// cond:
// result: (Select0 (DIVQU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 x y)
// cond:
// result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v1 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(fe.TypeInt16(), fe.TypeInt16()))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u x y)
// cond:
// result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(fe.TypeUInt16(), fe.TypeUInt16()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEq16(v *Value) bool {
b := v.Block
_ = b
// match: (Eq16 x y)
return true
}
}
-func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEq32(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32 x y)
return true
}
}
-func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32F x y)
return true
}
}
-func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEq64(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64 x y)
return true
}
}
-func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64F x y)
return true
}
}
-func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEq8(v *Value) bool {
b := v.Block
_ = b
// match: (Eq8 x y)
return true
}
}
-func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEqB(v *Value) bool {
b := v.Block
_ = b
// match: (EqB x y)
return true
}
}
-func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (EqPtr x y)
// cond: config.PtrSize == 8
// result: (SETEQ (CMPQ x y))
}
return false
}
-func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq16(v *Value) bool {
b := v.Block
_ = b
// match: (Geq16 x y)
return true
}
}
-func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq16U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq16U x y)
return true
}
}
-func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32 x y)
return true
}
}
-func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32F x y)
return true
}
}
-func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32U x y)
return true
}
}
-func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64 x y)
return true
}
}
-func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64F x y)
return true
}
}
-func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq64U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64U x y)
return true
}
}
-func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq8(v *Value) bool {
b := v.Block
_ = b
// match: (Geq8 x y)
return true
}
}
-func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq8U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq8U x y)
return true
}
}
-func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpGetClosurePtr(v *Value) bool {
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
return true
}
}
-func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpGetG(v *Value) bool {
// match: (GetG mem)
// cond:
// result: (LoweredGetG mem)
return true
}
}
-func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater16(v *Value) bool {
b := v.Block
_ = b
// match: (Greater16 x y)
return true
}
}
-func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater16U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater16U x y)
return true
}
}
-func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater32(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32 x y)
return true
}
}
-func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater32F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32F x y)
return true
}
}
-func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater32U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32U x y)
return true
}
}
-func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater64(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64 x y)
return true
}
}
-func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater64F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64F x y)
return true
}
}
-func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater64U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64U x y)
return true
}
}
-func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater8(v *Value) bool {
b := v.Block
_ = b
// match: (Greater8 x y)
return true
}
}
-func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater8U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater8U x y)
return true
}
}
-func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpHmul32(v *Value) bool {
// match: (Hmul32 x y)
// cond:
// result: (HMULL x y)
return true
}
}
-func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpHmul32u(v *Value) bool {
// match: (Hmul32u x y)
// cond:
// result: (HMULLU x y)
return true
}
}
-func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpHmul64(v *Value) bool {
// match: (Hmul64 x y)
// cond:
// result: (HMULQ x y)
return true
}
}
-func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpHmul64u(v *Value) bool {
// match: (Hmul64u x y)
// cond:
// result: (HMULQU x y)
return true
}
}
-func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpInt64Hi(v *Value) bool {
// match: (Int64Hi x)
// cond:
// result: (SHRQconst [32] x)
return true
}
}
-func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpInterCall(v *Value) bool {
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
return true
}
}
-func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsInBounds idx len)
return true
}
}
-func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (IsNonNil p)
// cond: config.PtrSize == 8
// result: (SETNE (TESTQ p p))
}
return false
}
-func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsSliceInBounds idx len)
return true
}
}
-func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq16(v *Value) bool {
b := v.Block
_ = b
// match: (Leq16 x y)
return true
}
}
-func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq16U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq16U x y)
return true
}
}
-func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32 x y)
return true
}
}
-func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
return true
}
}
-func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32U x y)
return true
}
}
-func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64 x y)
return true
}
}
-func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64F x y)
return true
}
}
-func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq64U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64U x y)
return true
}
}
-func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq8(v *Value) bool {
b := v.Block
_ = b
// match: (Leq8 x y)
return true
}
}
-func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq8U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq8U x y)
return true
}
}
-func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess16(v *Value) bool {
b := v.Block
_ = b
// match: (Less16 x y)
return true
}
}
-func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess16U(v *Value) bool {
b := v.Block
_ = b
// match: (Less16U x y)
return true
}
}
-func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess32(v *Value) bool {
b := v.Block
_ = b
// match: (Less32 x y)
return true
}
}
-func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess32F(v *Value) bool {
b := v.Block
_ = b
// match: (Less32F x y)
return true
}
}
-func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess32U(v *Value) bool {
b := v.Block
_ = b
// match: (Less32U x y)
return true
}
}
-func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess64(v *Value) bool {
b := v.Block
_ = b
// match: (Less64 x y)
return true
}
}
-func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess64F(v *Value) bool {
b := v.Block
_ = b
// match: (Less64F x y)
return true
}
}
-func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess64U(v *Value) bool {
b := v.Block
_ = b
// match: (Less64U x y)
return true
}
}
-func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess8(v *Value) bool {
b := v.Block
_ = b
// match: (Less8 x y)
return true
}
}
-func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess8U(v *Value) bool {
b := v.Block
_ = b
// match: (Less8U x y)
return true
}
}
-func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLoad(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8)
// result: (MOVQload ptr mem)
}
return false
}
-func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16 x y)
// cond:
// result: (Select1 (DIVW x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(fe.TypeInt16(), fe.TypeInt16()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16u x y)
// cond:
// result: (Select1 (DIVWU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(fe.TypeUInt16(), fe.TypeUInt16()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32 x y)
// cond:
// result: (Select1 (DIVL x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32u x y)
// cond:
// result: (Select1 (DIVLU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod64 x y)
// cond:
// result: (Select1 (DIVQ x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod64u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod64u x y)
// cond:
// result: (Select1 (DIVQU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8 x y)
// cond:
// result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
- v1 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(fe.TypeInt16(), fe.TypeInt16()))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to16, config.fe.TypeInt16())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8u x y)
// cond:
// result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(fe.TypeUInt16(), fe.TypeUInt16()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to16, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMove(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Move [0] _ _ mem)
// cond:
// result: mem
mem := v.Args[2]
v.reset(OpAMD64MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpAMD64MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpAMD64MOVLstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpAMD64MOVQstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpAMD64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, fe.TypeUInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpAMD64MOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, fe.TypeUInt8())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpAMD64MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpAMD64MOVLstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpAMD64MOVQstore)
v.AuxInt = s - 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
v0.AuxInt = s - 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
v2.AddArg(dst)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
v3.AddArg(src)
v3.AddArg(mem)
v2.AddArg(v3)
v.reset(OpAMD64REPMOVSQ)
v.AddArg(dst)
v.AddArg(src)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v0.AuxInt = s / 8
v.AddArg(v0)
v.AddArg(mem)
}
return false
}
-func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpMul16(v *Value) bool {
// match: (Mul16 x y)
// cond:
// result: (MULL x y)
return true
}
}
-func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpMul32(v *Value) bool {
// match: (Mul32 x y)
// cond:
// result: (MULL x y)
return true
}
}
-func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpMul32F(v *Value) bool {
// match: (Mul32F x y)
// cond:
// result: (MULSS x y)
return true
}
}
-func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpMul64(v *Value) bool {
// match: (Mul64 x y)
// cond:
// result: (MULQ x y)
return true
}
}
-func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpMul64F(v *Value) bool {
// match: (Mul64F x y)
// cond:
// result: (MULSD x y)
return true
}
}
-func rewriteValueAMD64_OpMul64uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpMul64uhilo(v *Value) bool {
// match: (Mul64uhilo x y)
// cond:
// result: (MULQU2 x y)
return true
}
}
-func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpMul8(v *Value) bool {
// match: (Mul8 x y)
// cond:
// result: (MULL x y)
return true
}
}
-func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpNeg16(v *Value) bool {
// match: (Neg16 x)
// cond:
// result: (NEGL x)
return true
}
}
-func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpNeg32(v *Value) bool {
// match: (Neg32 x)
// cond:
// result: (NEGL x)
return true
}
}
-func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeg32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neg32F x)
// cond:
- // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
for {
x := v.Args[0]
v.reset(OpAMD64PXOR)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, fe.TypeFloat32())
v0.AuxInt = f2i(math.Copysign(0, -1))
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpNeg64(v *Value) bool {
// match: (Neg64 x)
// cond:
// result: (NEGQ x)
return true
}
}
-func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeg64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neg64F x)
// cond:
- // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
for {
x := v.Args[0]
v.reset(OpAMD64PXOR)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, fe.TypeFloat64())
v0.AuxInt = f2i(math.Copysign(0, -1))
v.AddArg(v0)
return true
}
}
-func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpNeg8(v *Value) bool {
// match: (Neg8 x)
// cond:
// result: (NEGL x)
return true
}
}
-func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeq16(v *Value) bool {
b := v.Block
_ = b
// match: (Neq16 x y)
return true
}
}
-func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32 x y)
return true
}
}
-func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32F x y)
return true
}
}
-func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64 x y)
return true
}
}
-func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
return true
}
}
-func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeq8(v *Value) bool {
b := v.Block
_ = b
// match: (Neq8 x y)
return true
}
}
-func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeqB(v *Value) bool {
b := v.Block
_ = b
// match: (NeqB x y)
return true
}
}
-func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (NeqPtr x y)
// cond: config.PtrSize == 8
// result: (SETNE (CMPQ x y))
}
return false
}
-func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpNilCheck(v *Value) bool {
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
return true
}
}
-func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpNot(v *Value) bool {
// match: (Not x)
// cond:
// result: (XORLconst [1] x)
return true
}
}
-func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpOffPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (OffPtr [off] ptr)
// cond: config.PtrSize == 8 && is32Bit(off)
// result: (ADDQconst [off] ptr)
break
}
v.reset(OpAMD64ADDQ)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v0.AuxInt = off
v.AddArg(v0)
v.AddArg(ptr)
}
return false
}
-func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpOr16(v *Value) bool {
// match: (Or16 x y)
// cond:
// result: (ORL x y)
return true
}
}
-func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpOr32(v *Value) bool {
// match: (Or32 x y)
// cond:
// result: (ORL x y)
return true
}
}
-func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpOr64(v *Value) bool {
// match: (Or64 x y)
// cond:
// result: (ORQ x y)
return true
}
}
-func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpOr8(v *Value) bool {
// match: (Or8 x y)
// cond:
// result: (ORL x y)
return true
}
}
-func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpOrB(v *Value) bool {
// match: (OrB x y)
// cond:
// result: (ORL x y)
return true
}
}
-func rewriteValueAMD64_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpRound32F(v *Value) bool {
// match: (Round32F x)
// cond:
// result: x
return true
}
}
-func rewriteValueAMD64_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpRound64F(v *Value) bool {
// match: (Round64F x)
// cond:
// result: x
return true
}
}
-func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x16 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x32 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x64 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x8 <t> x y)
return true
}
}
-func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpSelect0(v *Value) bool {
b := v.Block
_ = b
// match: (Select0 <t> (AddTupleFirst32 tuple val))
}
return false
}
-func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSelect1(v *Value) bool {
// match: (Select1 (AddTupleFirst32 tuple _ ))
// cond:
// result: (Select1 tuple)
}
return false
}
-func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
// result: (MOVWQSX x)
return true
}
}
-func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSignExt16to64(v *Value) bool {
// match: (SignExt16to64 x)
// cond:
// result: (MOVWQSX x)
return true
}
}
-func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSignExt32to64(v *Value) bool {
// match: (SignExt32to64 x)
// cond:
// result: (MOVLQSX x)
return true
}
}
-func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 x)
// cond:
// result: (MOVBQSX x)
return true
}
}
-func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 x)
// cond:
// result: (MOVBQSX x)
return true
}
}
-func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSignExt8to64(v *Value) bool {
// match: (SignExt8to64 x)
// cond:
// result: (MOVBQSX x)
return true
}
}
-func rewriteValueAMD64_OpSlicemask(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpSlicemask(v *Value) bool {
b := v.Block
_ = b
// match: (Slicemask <t> x)
return true
}
}
-func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSqrt(v *Value) bool {
// match: (Sqrt x)
// cond:
// result: (SQRTSD x)
return true
}
}
-func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpStaticCall(v *Value) bool {
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
return true
}
}
-func rewriteValueAMD64_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpStore(v *Value) bool {
// match: (Store {t} ptr val mem)
// cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
// result: (MOVSDstore ptr val mem)
}
return false
}
-func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSub16(v *Value) bool {
// match: (Sub16 x y)
// cond:
// result: (SUBL x y)
return true
}
}
-func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSub32(v *Value) bool {
// match: (Sub32 x y)
// cond:
// result: (SUBL x y)
return true
}
}
-func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSub32F(v *Value) bool {
// match: (Sub32F x y)
// cond:
// result: (SUBSS x y)
return true
}
}
-func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSub64(v *Value) bool {
// match: (Sub64 x y)
// cond:
// result: (SUBQ x y)
return true
}
}
-func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSub64F(v *Value) bool {
// match: (Sub64F x y)
// cond:
// result: (SUBSD x y)
return true
}
}
-func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpSub8(v *Value) bool {
// match: (Sub8 x y)
// cond:
// result: (SUBL x y)
return true
}
}
-func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpSubPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (SubPtr x y)
// cond: config.PtrSize == 8
// result: (SUBQ x y)
}
return false
}
-func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpTrunc64to16(v *Value) bool {
// match: (Trunc64to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpTrunc64to32(v *Value) bool {
// match: (Trunc64to32 x)
// cond:
// result: x
return true
}
}
-func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpTrunc64to8(v *Value) bool {
// match: (Trunc64to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpXor16(v *Value) bool {
// match: (Xor16 x y)
// cond:
// result: (XORL x y)
return true
}
}
-func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpXor32(v *Value) bool {
// match: (Xor32 x y)
// cond:
// result: (XORL x y)
return true
}
}
-func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpXor64(v *Value) bool {
// match: (Xor64 x y)
// cond:
// result: (XORQ x y)
return true
}
}
-func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpXor8(v *Value) bool {
// match: (Xor8 x y)
// cond:
// result: (XORL x y)
return true
}
}
-func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpZero(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Zero [0] _ mem)
// cond:
// result: mem
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
v1.AddArg(destptr)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
}
v.reset(OpAMD64REPSTOSQ)
v.AddArg(destptr)
- v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v0.AuxInt = s / 8
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(mem)
}
return false
}
-func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVWQZX x)
return true
}
}
-func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpZeroExt16to64(v *Value) bool {
// match: (ZeroExt16to64 x)
// cond:
// result: (MOVWQZX x)
return true
}
}
-func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpZeroExt32to64(v *Value) bool {
// match: (ZeroExt32to64 x)
// cond:
// result: (MOVLQZX x)
return true
}
}
-func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBQZX x)
return true
}
}
-func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBQZX x)
return true
}
}
-func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueAMD64_OpZeroExt8to64(v *Value) bool {
// match: (ZeroExt8to64 x)
// cond:
// result: (MOVBQZX x)
return true
}
}
-func rewriteBlockAMD64(b *Block, config *Config) bool {
+func rewriteBlockAMD64(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case BlockAMD64EQ:
// match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueARM(v *Value, config *Config) bool {
+func rewriteValueARM(v *Value) bool {
switch v.Op {
case OpARMADC:
- return rewriteValueARM_OpARMADC(v, config)
+ return rewriteValueARM_OpARMADC(v)
case OpARMADCconst:
- return rewriteValueARM_OpARMADCconst(v, config)
+ return rewriteValueARM_OpARMADCconst(v)
case OpARMADCshiftLL:
- return rewriteValueARM_OpARMADCshiftLL(v, config)
+ return rewriteValueARM_OpARMADCshiftLL(v)
case OpARMADCshiftLLreg:
- return rewriteValueARM_OpARMADCshiftLLreg(v, config)
+ return rewriteValueARM_OpARMADCshiftLLreg(v)
case OpARMADCshiftRA:
- return rewriteValueARM_OpARMADCshiftRA(v, config)
+ return rewriteValueARM_OpARMADCshiftRA(v)
case OpARMADCshiftRAreg:
- return rewriteValueARM_OpARMADCshiftRAreg(v, config)
+ return rewriteValueARM_OpARMADCshiftRAreg(v)
case OpARMADCshiftRL:
- return rewriteValueARM_OpARMADCshiftRL(v, config)
+ return rewriteValueARM_OpARMADCshiftRL(v)
case OpARMADCshiftRLreg:
- return rewriteValueARM_OpARMADCshiftRLreg(v, config)
+ return rewriteValueARM_OpARMADCshiftRLreg(v)
case OpARMADD:
- return rewriteValueARM_OpARMADD(v, config)
+ return rewriteValueARM_OpARMADD(v)
case OpARMADDS:
- return rewriteValueARM_OpARMADDS(v, config)
+ return rewriteValueARM_OpARMADDS(v)
case OpARMADDSshiftLL:
- return rewriteValueARM_OpARMADDSshiftLL(v, config)
+ return rewriteValueARM_OpARMADDSshiftLL(v)
case OpARMADDSshiftLLreg:
- return rewriteValueARM_OpARMADDSshiftLLreg(v, config)
+ return rewriteValueARM_OpARMADDSshiftLLreg(v)
case OpARMADDSshiftRA:
- return rewriteValueARM_OpARMADDSshiftRA(v, config)
+ return rewriteValueARM_OpARMADDSshiftRA(v)
case OpARMADDSshiftRAreg:
- return rewriteValueARM_OpARMADDSshiftRAreg(v, config)
+ return rewriteValueARM_OpARMADDSshiftRAreg(v)
case OpARMADDSshiftRL:
- return rewriteValueARM_OpARMADDSshiftRL(v, config)
+ return rewriteValueARM_OpARMADDSshiftRL(v)
case OpARMADDSshiftRLreg:
- return rewriteValueARM_OpARMADDSshiftRLreg(v, config)
+ return rewriteValueARM_OpARMADDSshiftRLreg(v)
case OpARMADDconst:
- return rewriteValueARM_OpARMADDconst(v, config)
+ return rewriteValueARM_OpARMADDconst(v)
case OpARMADDshiftLL:
- return rewriteValueARM_OpARMADDshiftLL(v, config)
+ return rewriteValueARM_OpARMADDshiftLL(v)
case OpARMADDshiftLLreg:
- return rewriteValueARM_OpARMADDshiftLLreg(v, config)
+ return rewriteValueARM_OpARMADDshiftLLreg(v)
case OpARMADDshiftRA:
- return rewriteValueARM_OpARMADDshiftRA(v, config)
+ return rewriteValueARM_OpARMADDshiftRA(v)
case OpARMADDshiftRAreg:
- return rewriteValueARM_OpARMADDshiftRAreg(v, config)
+ return rewriteValueARM_OpARMADDshiftRAreg(v)
case OpARMADDshiftRL:
- return rewriteValueARM_OpARMADDshiftRL(v, config)
+ return rewriteValueARM_OpARMADDshiftRL(v)
case OpARMADDshiftRLreg:
- return rewriteValueARM_OpARMADDshiftRLreg(v, config)
+ return rewriteValueARM_OpARMADDshiftRLreg(v)
case OpARMAND:
- return rewriteValueARM_OpARMAND(v, config)
+ return rewriteValueARM_OpARMAND(v)
case OpARMANDconst:
- return rewriteValueARM_OpARMANDconst(v, config)
+ return rewriteValueARM_OpARMANDconst(v)
case OpARMANDshiftLL:
- return rewriteValueARM_OpARMANDshiftLL(v, config)
+ return rewriteValueARM_OpARMANDshiftLL(v)
case OpARMANDshiftLLreg:
- return rewriteValueARM_OpARMANDshiftLLreg(v, config)
+ return rewriteValueARM_OpARMANDshiftLLreg(v)
case OpARMANDshiftRA:
- return rewriteValueARM_OpARMANDshiftRA(v, config)
+ return rewriteValueARM_OpARMANDshiftRA(v)
case OpARMANDshiftRAreg:
- return rewriteValueARM_OpARMANDshiftRAreg(v, config)
+ return rewriteValueARM_OpARMANDshiftRAreg(v)
case OpARMANDshiftRL:
- return rewriteValueARM_OpARMANDshiftRL(v, config)
+ return rewriteValueARM_OpARMANDshiftRL(v)
case OpARMANDshiftRLreg:
- return rewriteValueARM_OpARMANDshiftRLreg(v, config)
+ return rewriteValueARM_OpARMANDshiftRLreg(v)
case OpARMBIC:
- return rewriteValueARM_OpARMBIC(v, config)
+ return rewriteValueARM_OpARMBIC(v)
case OpARMBICconst:
- return rewriteValueARM_OpARMBICconst(v, config)
+ return rewriteValueARM_OpARMBICconst(v)
case OpARMBICshiftLL:
- return rewriteValueARM_OpARMBICshiftLL(v, config)
+ return rewriteValueARM_OpARMBICshiftLL(v)
case OpARMBICshiftLLreg:
- return rewriteValueARM_OpARMBICshiftLLreg(v, config)
+ return rewriteValueARM_OpARMBICshiftLLreg(v)
case OpARMBICshiftRA:
- return rewriteValueARM_OpARMBICshiftRA(v, config)
+ return rewriteValueARM_OpARMBICshiftRA(v)
case OpARMBICshiftRAreg:
- return rewriteValueARM_OpARMBICshiftRAreg(v, config)
+ return rewriteValueARM_OpARMBICshiftRAreg(v)
case OpARMBICshiftRL:
- return rewriteValueARM_OpARMBICshiftRL(v, config)
+ return rewriteValueARM_OpARMBICshiftRL(v)
case OpARMBICshiftRLreg:
- return rewriteValueARM_OpARMBICshiftRLreg(v, config)
+ return rewriteValueARM_OpARMBICshiftRLreg(v)
case OpARMCMOVWHSconst:
- return rewriteValueARM_OpARMCMOVWHSconst(v, config)
+ return rewriteValueARM_OpARMCMOVWHSconst(v)
case OpARMCMOVWLSconst:
- return rewriteValueARM_OpARMCMOVWLSconst(v, config)
+ return rewriteValueARM_OpARMCMOVWLSconst(v)
case OpARMCMP:
- return rewriteValueARM_OpARMCMP(v, config)
+ return rewriteValueARM_OpARMCMP(v)
case OpARMCMPD:
- return rewriteValueARM_OpARMCMPD(v, config)
+ return rewriteValueARM_OpARMCMPD(v)
case OpARMCMPF:
- return rewriteValueARM_OpARMCMPF(v, config)
+ return rewriteValueARM_OpARMCMPF(v)
case OpARMCMPconst:
- return rewriteValueARM_OpARMCMPconst(v, config)
+ return rewriteValueARM_OpARMCMPconst(v)
case OpARMCMPshiftLL:
- return rewriteValueARM_OpARMCMPshiftLL(v, config)
+ return rewriteValueARM_OpARMCMPshiftLL(v)
case OpARMCMPshiftLLreg:
- return rewriteValueARM_OpARMCMPshiftLLreg(v, config)
+ return rewriteValueARM_OpARMCMPshiftLLreg(v)
case OpARMCMPshiftRA:
- return rewriteValueARM_OpARMCMPshiftRA(v, config)
+ return rewriteValueARM_OpARMCMPshiftRA(v)
case OpARMCMPshiftRAreg:
- return rewriteValueARM_OpARMCMPshiftRAreg(v, config)
+ return rewriteValueARM_OpARMCMPshiftRAreg(v)
case OpARMCMPshiftRL:
- return rewriteValueARM_OpARMCMPshiftRL(v, config)
+ return rewriteValueARM_OpARMCMPshiftRL(v)
case OpARMCMPshiftRLreg:
- return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
+ return rewriteValueARM_OpARMCMPshiftRLreg(v)
case OpARMEqual:
- return rewriteValueARM_OpARMEqual(v, config)
+ return rewriteValueARM_OpARMEqual(v)
case OpARMGreaterEqual:
- return rewriteValueARM_OpARMGreaterEqual(v, config)
+ return rewriteValueARM_OpARMGreaterEqual(v)
case OpARMGreaterEqualU:
- return rewriteValueARM_OpARMGreaterEqualU(v, config)
+ return rewriteValueARM_OpARMGreaterEqualU(v)
case OpARMGreaterThan:
- return rewriteValueARM_OpARMGreaterThan(v, config)
+ return rewriteValueARM_OpARMGreaterThan(v)
case OpARMGreaterThanU:
- return rewriteValueARM_OpARMGreaterThanU(v, config)
+ return rewriteValueARM_OpARMGreaterThanU(v)
case OpARMLessEqual:
- return rewriteValueARM_OpARMLessEqual(v, config)
+ return rewriteValueARM_OpARMLessEqual(v)
case OpARMLessEqualU:
- return rewriteValueARM_OpARMLessEqualU(v, config)
+ return rewriteValueARM_OpARMLessEqualU(v)
case OpARMLessThan:
- return rewriteValueARM_OpARMLessThan(v, config)
+ return rewriteValueARM_OpARMLessThan(v)
case OpARMLessThanU:
- return rewriteValueARM_OpARMLessThanU(v, config)
+ return rewriteValueARM_OpARMLessThanU(v)
case OpARMMOVBUload:
- return rewriteValueARM_OpARMMOVBUload(v, config)
+ return rewriteValueARM_OpARMMOVBUload(v)
case OpARMMOVBUreg:
- return rewriteValueARM_OpARMMOVBUreg(v, config)
+ return rewriteValueARM_OpARMMOVBUreg(v)
case OpARMMOVBload:
- return rewriteValueARM_OpARMMOVBload(v, config)
+ return rewriteValueARM_OpARMMOVBload(v)
case OpARMMOVBreg:
- return rewriteValueARM_OpARMMOVBreg(v, config)
+ return rewriteValueARM_OpARMMOVBreg(v)
case OpARMMOVBstore:
- return rewriteValueARM_OpARMMOVBstore(v, config)
+ return rewriteValueARM_OpARMMOVBstore(v)
case OpARMMOVDload:
- return rewriteValueARM_OpARMMOVDload(v, config)
+ return rewriteValueARM_OpARMMOVDload(v)
case OpARMMOVDstore:
- return rewriteValueARM_OpARMMOVDstore(v, config)
+ return rewriteValueARM_OpARMMOVDstore(v)
case OpARMMOVFload:
- return rewriteValueARM_OpARMMOVFload(v, config)
+ return rewriteValueARM_OpARMMOVFload(v)
case OpARMMOVFstore:
- return rewriteValueARM_OpARMMOVFstore(v, config)
+ return rewriteValueARM_OpARMMOVFstore(v)
case OpARMMOVHUload:
- return rewriteValueARM_OpARMMOVHUload(v, config)
+ return rewriteValueARM_OpARMMOVHUload(v)
case OpARMMOVHUreg:
- return rewriteValueARM_OpARMMOVHUreg(v, config)
+ return rewriteValueARM_OpARMMOVHUreg(v)
case OpARMMOVHload:
- return rewriteValueARM_OpARMMOVHload(v, config)
+ return rewriteValueARM_OpARMMOVHload(v)
case OpARMMOVHreg:
- return rewriteValueARM_OpARMMOVHreg(v, config)
+ return rewriteValueARM_OpARMMOVHreg(v)
case OpARMMOVHstore:
- return rewriteValueARM_OpARMMOVHstore(v, config)
+ return rewriteValueARM_OpARMMOVHstore(v)
case OpARMMOVWload:
- return rewriteValueARM_OpARMMOVWload(v, config)
+ return rewriteValueARM_OpARMMOVWload(v)
case OpARMMOVWloadidx:
- return rewriteValueARM_OpARMMOVWloadidx(v, config)
+ return rewriteValueARM_OpARMMOVWloadidx(v)
case OpARMMOVWloadshiftLL:
- return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
+ return rewriteValueARM_OpARMMOVWloadshiftLL(v)
case OpARMMOVWloadshiftRA:
- return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
+ return rewriteValueARM_OpARMMOVWloadshiftRA(v)
case OpARMMOVWloadshiftRL:
- return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
+ return rewriteValueARM_OpARMMOVWloadshiftRL(v)
case OpARMMOVWreg:
- return rewriteValueARM_OpARMMOVWreg(v, config)
+ return rewriteValueARM_OpARMMOVWreg(v)
case OpARMMOVWstore:
- return rewriteValueARM_OpARMMOVWstore(v, config)
+ return rewriteValueARM_OpARMMOVWstore(v)
case OpARMMOVWstoreidx:
- return rewriteValueARM_OpARMMOVWstoreidx(v, config)
+ return rewriteValueARM_OpARMMOVWstoreidx(v)
case OpARMMOVWstoreshiftLL:
- return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
+ return rewriteValueARM_OpARMMOVWstoreshiftLL(v)
case OpARMMOVWstoreshiftRA:
- return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
+ return rewriteValueARM_OpARMMOVWstoreshiftRA(v)
case OpARMMOVWstoreshiftRL:
- return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
+ return rewriteValueARM_OpARMMOVWstoreshiftRL(v)
case OpARMMUL:
- return rewriteValueARM_OpARMMUL(v, config)
+ return rewriteValueARM_OpARMMUL(v)
case OpARMMULA:
- return rewriteValueARM_OpARMMULA(v, config)
+ return rewriteValueARM_OpARMMULA(v)
case OpARMMVN:
- return rewriteValueARM_OpARMMVN(v, config)
+ return rewriteValueARM_OpARMMVN(v)
case OpARMMVNshiftLL:
- return rewriteValueARM_OpARMMVNshiftLL(v, config)
+ return rewriteValueARM_OpARMMVNshiftLL(v)
case OpARMMVNshiftLLreg:
- return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
+ return rewriteValueARM_OpARMMVNshiftLLreg(v)
case OpARMMVNshiftRA:
- return rewriteValueARM_OpARMMVNshiftRA(v, config)
+ return rewriteValueARM_OpARMMVNshiftRA(v)
case OpARMMVNshiftRAreg:
- return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
+ return rewriteValueARM_OpARMMVNshiftRAreg(v)
case OpARMMVNshiftRL:
- return rewriteValueARM_OpARMMVNshiftRL(v, config)
+ return rewriteValueARM_OpARMMVNshiftRL(v)
case OpARMMVNshiftRLreg:
- return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
+ return rewriteValueARM_OpARMMVNshiftRLreg(v)
case OpARMNotEqual:
- return rewriteValueARM_OpARMNotEqual(v, config)
+ return rewriteValueARM_OpARMNotEqual(v)
case OpARMOR:
- return rewriteValueARM_OpARMOR(v, config)
+ return rewriteValueARM_OpARMOR(v)
case OpARMORconst:
- return rewriteValueARM_OpARMORconst(v, config)
+ return rewriteValueARM_OpARMORconst(v)
case OpARMORshiftLL:
- return rewriteValueARM_OpARMORshiftLL(v, config)
+ return rewriteValueARM_OpARMORshiftLL(v)
case OpARMORshiftLLreg:
- return rewriteValueARM_OpARMORshiftLLreg(v, config)
+ return rewriteValueARM_OpARMORshiftLLreg(v)
case OpARMORshiftRA:
- return rewriteValueARM_OpARMORshiftRA(v, config)
+ return rewriteValueARM_OpARMORshiftRA(v)
case OpARMORshiftRAreg:
- return rewriteValueARM_OpARMORshiftRAreg(v, config)
+ return rewriteValueARM_OpARMORshiftRAreg(v)
case OpARMORshiftRL:
- return rewriteValueARM_OpARMORshiftRL(v, config)
+ return rewriteValueARM_OpARMORshiftRL(v)
case OpARMORshiftRLreg:
- return rewriteValueARM_OpARMORshiftRLreg(v, config)
+ return rewriteValueARM_OpARMORshiftRLreg(v)
case OpARMRSB:
- return rewriteValueARM_OpARMRSB(v, config)
+ return rewriteValueARM_OpARMRSB(v)
case OpARMRSBSshiftLL:
- return rewriteValueARM_OpARMRSBSshiftLL(v, config)
+ return rewriteValueARM_OpARMRSBSshiftLL(v)
case OpARMRSBSshiftLLreg:
- return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
+ return rewriteValueARM_OpARMRSBSshiftLLreg(v)
case OpARMRSBSshiftRA:
- return rewriteValueARM_OpARMRSBSshiftRA(v, config)
+ return rewriteValueARM_OpARMRSBSshiftRA(v)
case OpARMRSBSshiftRAreg:
- return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
+ return rewriteValueARM_OpARMRSBSshiftRAreg(v)
case OpARMRSBSshiftRL:
- return rewriteValueARM_OpARMRSBSshiftRL(v, config)
+ return rewriteValueARM_OpARMRSBSshiftRL(v)
case OpARMRSBSshiftRLreg:
- return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
+ return rewriteValueARM_OpARMRSBSshiftRLreg(v)
case OpARMRSBconst:
- return rewriteValueARM_OpARMRSBconst(v, config)
+ return rewriteValueARM_OpARMRSBconst(v)
case OpARMRSBshiftLL:
- return rewriteValueARM_OpARMRSBshiftLL(v, config)
+ return rewriteValueARM_OpARMRSBshiftLL(v)
case OpARMRSBshiftLLreg:
- return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
+ return rewriteValueARM_OpARMRSBshiftLLreg(v)
case OpARMRSBshiftRA:
- return rewriteValueARM_OpARMRSBshiftRA(v, config)
+ return rewriteValueARM_OpARMRSBshiftRA(v)
case OpARMRSBshiftRAreg:
- return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
+ return rewriteValueARM_OpARMRSBshiftRAreg(v)
case OpARMRSBshiftRL:
- return rewriteValueARM_OpARMRSBshiftRL(v, config)
+ return rewriteValueARM_OpARMRSBshiftRL(v)
case OpARMRSBshiftRLreg:
- return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
+ return rewriteValueARM_OpARMRSBshiftRLreg(v)
case OpARMRSCconst:
- return rewriteValueARM_OpARMRSCconst(v, config)
+ return rewriteValueARM_OpARMRSCconst(v)
case OpARMRSCshiftLL:
- return rewriteValueARM_OpARMRSCshiftLL(v, config)
+ return rewriteValueARM_OpARMRSCshiftLL(v)
case OpARMRSCshiftLLreg:
- return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
+ return rewriteValueARM_OpARMRSCshiftLLreg(v)
case OpARMRSCshiftRA:
- return rewriteValueARM_OpARMRSCshiftRA(v, config)
+ return rewriteValueARM_OpARMRSCshiftRA(v)
case OpARMRSCshiftRAreg:
- return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
+ return rewriteValueARM_OpARMRSCshiftRAreg(v)
case OpARMRSCshiftRL:
- return rewriteValueARM_OpARMRSCshiftRL(v, config)
+ return rewriteValueARM_OpARMRSCshiftRL(v)
case OpARMRSCshiftRLreg:
- return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
+ return rewriteValueARM_OpARMRSCshiftRLreg(v)
case OpARMSBC:
- return rewriteValueARM_OpARMSBC(v, config)
+ return rewriteValueARM_OpARMSBC(v)
case OpARMSBCconst:
- return rewriteValueARM_OpARMSBCconst(v, config)
+ return rewriteValueARM_OpARMSBCconst(v)
case OpARMSBCshiftLL:
- return rewriteValueARM_OpARMSBCshiftLL(v, config)
+ return rewriteValueARM_OpARMSBCshiftLL(v)
case OpARMSBCshiftLLreg:
- return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
+ return rewriteValueARM_OpARMSBCshiftLLreg(v)
case OpARMSBCshiftRA:
- return rewriteValueARM_OpARMSBCshiftRA(v, config)
+ return rewriteValueARM_OpARMSBCshiftRA(v)
case OpARMSBCshiftRAreg:
- return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
+ return rewriteValueARM_OpARMSBCshiftRAreg(v)
case OpARMSBCshiftRL:
- return rewriteValueARM_OpARMSBCshiftRL(v, config)
+ return rewriteValueARM_OpARMSBCshiftRL(v)
case OpARMSBCshiftRLreg:
- return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
+ return rewriteValueARM_OpARMSBCshiftRLreg(v)
case OpARMSLL:
- return rewriteValueARM_OpARMSLL(v, config)
+ return rewriteValueARM_OpARMSLL(v)
case OpARMSLLconst:
- return rewriteValueARM_OpARMSLLconst(v, config)
+ return rewriteValueARM_OpARMSLLconst(v)
case OpARMSRA:
- return rewriteValueARM_OpARMSRA(v, config)
+ return rewriteValueARM_OpARMSRA(v)
case OpARMSRAcond:
- return rewriteValueARM_OpARMSRAcond(v, config)
+ return rewriteValueARM_OpARMSRAcond(v)
case OpARMSRAconst:
- return rewriteValueARM_OpARMSRAconst(v, config)
+ return rewriteValueARM_OpARMSRAconst(v)
case OpARMSRL:
- return rewriteValueARM_OpARMSRL(v, config)
+ return rewriteValueARM_OpARMSRL(v)
case OpARMSRLconst:
- return rewriteValueARM_OpARMSRLconst(v, config)
+ return rewriteValueARM_OpARMSRLconst(v)
case OpARMSUB:
- return rewriteValueARM_OpARMSUB(v, config)
+ return rewriteValueARM_OpARMSUB(v)
case OpARMSUBS:
- return rewriteValueARM_OpARMSUBS(v, config)
+ return rewriteValueARM_OpARMSUBS(v)
case OpARMSUBSshiftLL:
- return rewriteValueARM_OpARMSUBSshiftLL(v, config)
+ return rewriteValueARM_OpARMSUBSshiftLL(v)
case OpARMSUBSshiftLLreg:
- return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
+ return rewriteValueARM_OpARMSUBSshiftLLreg(v)
case OpARMSUBSshiftRA:
- return rewriteValueARM_OpARMSUBSshiftRA(v, config)
+ return rewriteValueARM_OpARMSUBSshiftRA(v)
case OpARMSUBSshiftRAreg:
- return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
+ return rewriteValueARM_OpARMSUBSshiftRAreg(v)
case OpARMSUBSshiftRL:
- return rewriteValueARM_OpARMSUBSshiftRL(v, config)
+ return rewriteValueARM_OpARMSUBSshiftRL(v)
case OpARMSUBSshiftRLreg:
- return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
+ return rewriteValueARM_OpARMSUBSshiftRLreg(v)
case OpARMSUBconst:
- return rewriteValueARM_OpARMSUBconst(v, config)
+ return rewriteValueARM_OpARMSUBconst(v)
case OpARMSUBshiftLL:
- return rewriteValueARM_OpARMSUBshiftLL(v, config)
+ return rewriteValueARM_OpARMSUBshiftLL(v)
case OpARMSUBshiftLLreg:
- return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
+ return rewriteValueARM_OpARMSUBshiftLLreg(v)
case OpARMSUBshiftRA:
- return rewriteValueARM_OpARMSUBshiftRA(v, config)
+ return rewriteValueARM_OpARMSUBshiftRA(v)
case OpARMSUBshiftRAreg:
- return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
+ return rewriteValueARM_OpARMSUBshiftRAreg(v)
case OpARMSUBshiftRL:
- return rewriteValueARM_OpARMSUBshiftRL(v, config)
+ return rewriteValueARM_OpARMSUBshiftRL(v)
case OpARMSUBshiftRLreg:
- return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
+ return rewriteValueARM_OpARMSUBshiftRLreg(v)
case OpARMXOR:
- return rewriteValueARM_OpARMXOR(v, config)
+ return rewriteValueARM_OpARMXOR(v)
case OpARMXORconst:
- return rewriteValueARM_OpARMXORconst(v, config)
+ return rewriteValueARM_OpARMXORconst(v)
case OpARMXORshiftLL:
- return rewriteValueARM_OpARMXORshiftLL(v, config)
+ return rewriteValueARM_OpARMXORshiftLL(v)
case OpARMXORshiftLLreg:
- return rewriteValueARM_OpARMXORshiftLLreg(v, config)
+ return rewriteValueARM_OpARMXORshiftLLreg(v)
case OpARMXORshiftRA:
- return rewriteValueARM_OpARMXORshiftRA(v, config)
+ return rewriteValueARM_OpARMXORshiftRA(v)
case OpARMXORshiftRAreg:
- return rewriteValueARM_OpARMXORshiftRAreg(v, config)
+ return rewriteValueARM_OpARMXORshiftRAreg(v)
case OpARMXORshiftRL:
- return rewriteValueARM_OpARMXORshiftRL(v, config)
+ return rewriteValueARM_OpARMXORshiftRL(v)
case OpARMXORshiftRLreg:
- return rewriteValueARM_OpARMXORshiftRLreg(v, config)
+ return rewriteValueARM_OpARMXORshiftRLreg(v)
case OpARMXORshiftRR:
- return rewriteValueARM_OpARMXORshiftRR(v, config)
+ return rewriteValueARM_OpARMXORshiftRR(v)
case OpAdd16:
- return rewriteValueARM_OpAdd16(v, config)
+ return rewriteValueARM_OpAdd16(v)
case OpAdd32:
- return rewriteValueARM_OpAdd32(v, config)
+ return rewriteValueARM_OpAdd32(v)
case OpAdd32F:
- return rewriteValueARM_OpAdd32F(v, config)
+ return rewriteValueARM_OpAdd32F(v)
case OpAdd32carry:
- return rewriteValueARM_OpAdd32carry(v, config)
+ return rewriteValueARM_OpAdd32carry(v)
case OpAdd32withcarry:
- return rewriteValueARM_OpAdd32withcarry(v, config)
+ return rewriteValueARM_OpAdd32withcarry(v)
case OpAdd64F:
- return rewriteValueARM_OpAdd64F(v, config)
+ return rewriteValueARM_OpAdd64F(v)
case OpAdd8:
- return rewriteValueARM_OpAdd8(v, config)
+ return rewriteValueARM_OpAdd8(v)
case OpAddPtr:
- return rewriteValueARM_OpAddPtr(v, config)
+ return rewriteValueARM_OpAddPtr(v)
case OpAddr:
- return rewriteValueARM_OpAddr(v, config)
+ return rewriteValueARM_OpAddr(v)
case OpAnd16:
- return rewriteValueARM_OpAnd16(v, config)
+ return rewriteValueARM_OpAnd16(v)
case OpAnd32:
- return rewriteValueARM_OpAnd32(v, config)
+ return rewriteValueARM_OpAnd32(v)
case OpAnd8:
- return rewriteValueARM_OpAnd8(v, config)
+ return rewriteValueARM_OpAnd8(v)
case OpAndB:
- return rewriteValueARM_OpAndB(v, config)
+ return rewriteValueARM_OpAndB(v)
case OpAvg32u:
- return rewriteValueARM_OpAvg32u(v, config)
+ return rewriteValueARM_OpAvg32u(v)
case OpBitLen32:
- return rewriteValueARM_OpBitLen32(v, config)
+ return rewriteValueARM_OpBitLen32(v)
case OpBswap32:
- return rewriteValueARM_OpBswap32(v, config)
+ return rewriteValueARM_OpBswap32(v)
case OpClosureCall:
- return rewriteValueARM_OpClosureCall(v, config)
+ return rewriteValueARM_OpClosureCall(v)
case OpCom16:
- return rewriteValueARM_OpCom16(v, config)
+ return rewriteValueARM_OpCom16(v)
case OpCom32:
- return rewriteValueARM_OpCom32(v, config)
+ return rewriteValueARM_OpCom32(v)
case OpCom8:
- return rewriteValueARM_OpCom8(v, config)
+ return rewriteValueARM_OpCom8(v)
case OpConst16:
- return rewriteValueARM_OpConst16(v, config)
+ return rewriteValueARM_OpConst16(v)
case OpConst32:
- return rewriteValueARM_OpConst32(v, config)
+ return rewriteValueARM_OpConst32(v)
case OpConst32F:
- return rewriteValueARM_OpConst32F(v, config)
+ return rewriteValueARM_OpConst32F(v)
case OpConst64F:
- return rewriteValueARM_OpConst64F(v, config)
+ return rewriteValueARM_OpConst64F(v)
case OpConst8:
- return rewriteValueARM_OpConst8(v, config)
+ return rewriteValueARM_OpConst8(v)
case OpConstBool:
- return rewriteValueARM_OpConstBool(v, config)
+ return rewriteValueARM_OpConstBool(v)
case OpConstNil:
- return rewriteValueARM_OpConstNil(v, config)
+ return rewriteValueARM_OpConstNil(v)
case OpConvert:
- return rewriteValueARM_OpConvert(v, config)
+ return rewriteValueARM_OpConvert(v)
case OpCtz32:
- return rewriteValueARM_OpCtz32(v, config)
+ return rewriteValueARM_OpCtz32(v)
case OpCvt32Fto32:
- return rewriteValueARM_OpCvt32Fto32(v, config)
+ return rewriteValueARM_OpCvt32Fto32(v)
case OpCvt32Fto32U:
- return rewriteValueARM_OpCvt32Fto32U(v, config)
+ return rewriteValueARM_OpCvt32Fto32U(v)
case OpCvt32Fto64F:
- return rewriteValueARM_OpCvt32Fto64F(v, config)
+ return rewriteValueARM_OpCvt32Fto64F(v)
case OpCvt32Uto32F:
- return rewriteValueARM_OpCvt32Uto32F(v, config)
+ return rewriteValueARM_OpCvt32Uto32F(v)
case OpCvt32Uto64F:
- return rewriteValueARM_OpCvt32Uto64F(v, config)
+ return rewriteValueARM_OpCvt32Uto64F(v)
case OpCvt32to32F:
- return rewriteValueARM_OpCvt32to32F(v, config)
+ return rewriteValueARM_OpCvt32to32F(v)
case OpCvt32to64F:
- return rewriteValueARM_OpCvt32to64F(v, config)
+ return rewriteValueARM_OpCvt32to64F(v)
case OpCvt64Fto32:
- return rewriteValueARM_OpCvt64Fto32(v, config)
+ return rewriteValueARM_OpCvt64Fto32(v)
case OpCvt64Fto32F:
- return rewriteValueARM_OpCvt64Fto32F(v, config)
+ return rewriteValueARM_OpCvt64Fto32F(v)
case OpCvt64Fto32U:
- return rewriteValueARM_OpCvt64Fto32U(v, config)
+ return rewriteValueARM_OpCvt64Fto32U(v)
case OpDiv16:
- return rewriteValueARM_OpDiv16(v, config)
+ return rewriteValueARM_OpDiv16(v)
case OpDiv16u:
- return rewriteValueARM_OpDiv16u(v, config)
+ return rewriteValueARM_OpDiv16u(v)
case OpDiv32:
- return rewriteValueARM_OpDiv32(v, config)
+ return rewriteValueARM_OpDiv32(v)
case OpDiv32F:
- return rewriteValueARM_OpDiv32F(v, config)
+ return rewriteValueARM_OpDiv32F(v)
case OpDiv32u:
- return rewriteValueARM_OpDiv32u(v, config)
+ return rewriteValueARM_OpDiv32u(v)
case OpDiv64F:
- return rewriteValueARM_OpDiv64F(v, config)
+ return rewriteValueARM_OpDiv64F(v)
case OpDiv8:
- return rewriteValueARM_OpDiv8(v, config)
+ return rewriteValueARM_OpDiv8(v)
case OpDiv8u:
- return rewriteValueARM_OpDiv8u(v, config)
+ return rewriteValueARM_OpDiv8u(v)
case OpEq16:
- return rewriteValueARM_OpEq16(v, config)
+ return rewriteValueARM_OpEq16(v)
case OpEq32:
- return rewriteValueARM_OpEq32(v, config)
+ return rewriteValueARM_OpEq32(v)
case OpEq32F:
- return rewriteValueARM_OpEq32F(v, config)
+ return rewriteValueARM_OpEq32F(v)
case OpEq64F:
- return rewriteValueARM_OpEq64F(v, config)
+ return rewriteValueARM_OpEq64F(v)
case OpEq8:
- return rewriteValueARM_OpEq8(v, config)
+ return rewriteValueARM_OpEq8(v)
case OpEqB:
- return rewriteValueARM_OpEqB(v, config)
+ return rewriteValueARM_OpEqB(v)
case OpEqPtr:
- return rewriteValueARM_OpEqPtr(v, config)
+ return rewriteValueARM_OpEqPtr(v)
case OpGeq16:
- return rewriteValueARM_OpGeq16(v, config)
+ return rewriteValueARM_OpGeq16(v)
case OpGeq16U:
- return rewriteValueARM_OpGeq16U(v, config)
+ return rewriteValueARM_OpGeq16U(v)
case OpGeq32:
- return rewriteValueARM_OpGeq32(v, config)
+ return rewriteValueARM_OpGeq32(v)
case OpGeq32F:
- return rewriteValueARM_OpGeq32F(v, config)
+ return rewriteValueARM_OpGeq32F(v)
case OpGeq32U:
- return rewriteValueARM_OpGeq32U(v, config)
+ return rewriteValueARM_OpGeq32U(v)
case OpGeq64F:
- return rewriteValueARM_OpGeq64F(v, config)
+ return rewriteValueARM_OpGeq64F(v)
case OpGeq8:
- return rewriteValueARM_OpGeq8(v, config)
+ return rewriteValueARM_OpGeq8(v)
case OpGeq8U:
- return rewriteValueARM_OpGeq8U(v, config)
+ return rewriteValueARM_OpGeq8U(v)
case OpGetClosurePtr:
- return rewriteValueARM_OpGetClosurePtr(v, config)
+ return rewriteValueARM_OpGetClosurePtr(v)
case OpGreater16:
- return rewriteValueARM_OpGreater16(v, config)
+ return rewriteValueARM_OpGreater16(v)
case OpGreater16U:
- return rewriteValueARM_OpGreater16U(v, config)
+ return rewriteValueARM_OpGreater16U(v)
case OpGreater32:
- return rewriteValueARM_OpGreater32(v, config)
+ return rewriteValueARM_OpGreater32(v)
case OpGreater32F:
- return rewriteValueARM_OpGreater32F(v, config)
+ return rewriteValueARM_OpGreater32F(v)
case OpGreater32U:
- return rewriteValueARM_OpGreater32U(v, config)
+ return rewriteValueARM_OpGreater32U(v)
case OpGreater64F:
- return rewriteValueARM_OpGreater64F(v, config)
+ return rewriteValueARM_OpGreater64F(v)
case OpGreater8:
- return rewriteValueARM_OpGreater8(v, config)
+ return rewriteValueARM_OpGreater8(v)
case OpGreater8U:
- return rewriteValueARM_OpGreater8U(v, config)
+ return rewriteValueARM_OpGreater8U(v)
case OpHmul32:
- return rewriteValueARM_OpHmul32(v, config)
+ return rewriteValueARM_OpHmul32(v)
case OpHmul32u:
- return rewriteValueARM_OpHmul32u(v, config)
+ return rewriteValueARM_OpHmul32u(v)
case OpInterCall:
- return rewriteValueARM_OpInterCall(v, config)
+ return rewriteValueARM_OpInterCall(v)
case OpIsInBounds:
- return rewriteValueARM_OpIsInBounds(v, config)
+ return rewriteValueARM_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValueARM_OpIsNonNil(v, config)
+ return rewriteValueARM_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValueARM_OpIsSliceInBounds(v, config)
+ return rewriteValueARM_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValueARM_OpLeq16(v, config)
+ return rewriteValueARM_OpLeq16(v)
case OpLeq16U:
- return rewriteValueARM_OpLeq16U(v, config)
+ return rewriteValueARM_OpLeq16U(v)
case OpLeq32:
- return rewriteValueARM_OpLeq32(v, config)
+ return rewriteValueARM_OpLeq32(v)
case OpLeq32F:
- return rewriteValueARM_OpLeq32F(v, config)
+ return rewriteValueARM_OpLeq32F(v)
case OpLeq32U:
- return rewriteValueARM_OpLeq32U(v, config)
+ return rewriteValueARM_OpLeq32U(v)
case OpLeq64F:
- return rewriteValueARM_OpLeq64F(v, config)
+ return rewriteValueARM_OpLeq64F(v)
case OpLeq8:
- return rewriteValueARM_OpLeq8(v, config)
+ return rewriteValueARM_OpLeq8(v)
case OpLeq8U:
- return rewriteValueARM_OpLeq8U(v, config)
+ return rewriteValueARM_OpLeq8U(v)
case OpLess16:
- return rewriteValueARM_OpLess16(v, config)
+ return rewriteValueARM_OpLess16(v)
case OpLess16U:
- return rewriteValueARM_OpLess16U(v, config)
+ return rewriteValueARM_OpLess16U(v)
case OpLess32:
- return rewriteValueARM_OpLess32(v, config)
+ return rewriteValueARM_OpLess32(v)
case OpLess32F:
- return rewriteValueARM_OpLess32F(v, config)
+ return rewriteValueARM_OpLess32F(v)
case OpLess32U:
- return rewriteValueARM_OpLess32U(v, config)
+ return rewriteValueARM_OpLess32U(v)
case OpLess64F:
- return rewriteValueARM_OpLess64F(v, config)
+ return rewriteValueARM_OpLess64F(v)
case OpLess8:
- return rewriteValueARM_OpLess8(v, config)
+ return rewriteValueARM_OpLess8(v)
case OpLess8U:
- return rewriteValueARM_OpLess8U(v, config)
+ return rewriteValueARM_OpLess8U(v)
case OpLoad:
- return rewriteValueARM_OpLoad(v, config)
+ return rewriteValueARM_OpLoad(v)
case OpLsh16x16:
- return rewriteValueARM_OpLsh16x16(v, config)
+ return rewriteValueARM_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValueARM_OpLsh16x32(v, config)
+ return rewriteValueARM_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValueARM_OpLsh16x64(v, config)
+ return rewriteValueARM_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValueARM_OpLsh16x8(v, config)
+ return rewriteValueARM_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValueARM_OpLsh32x16(v, config)
+ return rewriteValueARM_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValueARM_OpLsh32x32(v, config)
+ return rewriteValueARM_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValueARM_OpLsh32x64(v, config)
+ return rewriteValueARM_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValueARM_OpLsh32x8(v, config)
+ return rewriteValueARM_OpLsh32x8(v)
case OpLsh8x16:
- return rewriteValueARM_OpLsh8x16(v, config)
+ return rewriteValueARM_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValueARM_OpLsh8x32(v, config)
+ return rewriteValueARM_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValueARM_OpLsh8x64(v, config)
+ return rewriteValueARM_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValueARM_OpLsh8x8(v, config)
+ return rewriteValueARM_OpLsh8x8(v)
case OpMod16:
- return rewriteValueARM_OpMod16(v, config)
+ return rewriteValueARM_OpMod16(v)
case OpMod16u:
- return rewriteValueARM_OpMod16u(v, config)
+ return rewriteValueARM_OpMod16u(v)
case OpMod32:
- return rewriteValueARM_OpMod32(v, config)
+ return rewriteValueARM_OpMod32(v)
case OpMod32u:
- return rewriteValueARM_OpMod32u(v, config)
+ return rewriteValueARM_OpMod32u(v)
case OpMod8:
- return rewriteValueARM_OpMod8(v, config)
+ return rewriteValueARM_OpMod8(v)
case OpMod8u:
- return rewriteValueARM_OpMod8u(v, config)
+ return rewriteValueARM_OpMod8u(v)
case OpMove:
- return rewriteValueARM_OpMove(v, config)
+ return rewriteValueARM_OpMove(v)
case OpMul16:
- return rewriteValueARM_OpMul16(v, config)
+ return rewriteValueARM_OpMul16(v)
case OpMul32:
- return rewriteValueARM_OpMul32(v, config)
+ return rewriteValueARM_OpMul32(v)
case OpMul32F:
- return rewriteValueARM_OpMul32F(v, config)
+ return rewriteValueARM_OpMul32F(v)
case OpMul32uhilo:
- return rewriteValueARM_OpMul32uhilo(v, config)
+ return rewriteValueARM_OpMul32uhilo(v)
case OpMul64F:
- return rewriteValueARM_OpMul64F(v, config)
+ return rewriteValueARM_OpMul64F(v)
case OpMul8:
- return rewriteValueARM_OpMul8(v, config)
+ return rewriteValueARM_OpMul8(v)
case OpNeg16:
- return rewriteValueARM_OpNeg16(v, config)
+ return rewriteValueARM_OpNeg16(v)
case OpNeg32:
- return rewriteValueARM_OpNeg32(v, config)
+ return rewriteValueARM_OpNeg32(v)
case OpNeg32F:
- return rewriteValueARM_OpNeg32F(v, config)
+ return rewriteValueARM_OpNeg32F(v)
case OpNeg64F:
- return rewriteValueARM_OpNeg64F(v, config)
+ return rewriteValueARM_OpNeg64F(v)
case OpNeg8:
- return rewriteValueARM_OpNeg8(v, config)
+ return rewriteValueARM_OpNeg8(v)
case OpNeq16:
- return rewriteValueARM_OpNeq16(v, config)
+ return rewriteValueARM_OpNeq16(v)
case OpNeq32:
- return rewriteValueARM_OpNeq32(v, config)
+ return rewriteValueARM_OpNeq32(v)
case OpNeq32F:
- return rewriteValueARM_OpNeq32F(v, config)
+ return rewriteValueARM_OpNeq32F(v)
case OpNeq64F:
- return rewriteValueARM_OpNeq64F(v, config)
+ return rewriteValueARM_OpNeq64F(v)
case OpNeq8:
- return rewriteValueARM_OpNeq8(v, config)
+ return rewriteValueARM_OpNeq8(v)
case OpNeqB:
- return rewriteValueARM_OpNeqB(v, config)
+ return rewriteValueARM_OpNeqB(v)
case OpNeqPtr:
- return rewriteValueARM_OpNeqPtr(v, config)
+ return rewriteValueARM_OpNeqPtr(v)
case OpNilCheck:
- return rewriteValueARM_OpNilCheck(v, config)
+ return rewriteValueARM_OpNilCheck(v)
case OpNot:
- return rewriteValueARM_OpNot(v, config)
+ return rewriteValueARM_OpNot(v)
case OpOffPtr:
- return rewriteValueARM_OpOffPtr(v, config)
+ return rewriteValueARM_OpOffPtr(v)
case OpOr16:
- return rewriteValueARM_OpOr16(v, config)
+ return rewriteValueARM_OpOr16(v)
case OpOr32:
- return rewriteValueARM_OpOr32(v, config)
+ return rewriteValueARM_OpOr32(v)
case OpOr8:
- return rewriteValueARM_OpOr8(v, config)
+ return rewriteValueARM_OpOr8(v)
case OpOrB:
- return rewriteValueARM_OpOrB(v, config)
+ return rewriteValueARM_OpOrB(v)
case OpRound32F:
- return rewriteValueARM_OpRound32F(v, config)
+ return rewriteValueARM_OpRound32F(v)
case OpRound64F:
- return rewriteValueARM_OpRound64F(v, config)
+ return rewriteValueARM_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValueARM_OpRsh16Ux16(v, config)
+ return rewriteValueARM_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValueARM_OpRsh16Ux32(v, config)
+ return rewriteValueARM_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValueARM_OpRsh16Ux64(v, config)
+ return rewriteValueARM_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValueARM_OpRsh16Ux8(v, config)
+ return rewriteValueARM_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValueARM_OpRsh16x16(v, config)
+ return rewriteValueARM_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValueARM_OpRsh16x32(v, config)
+ return rewriteValueARM_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValueARM_OpRsh16x64(v, config)
+ return rewriteValueARM_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValueARM_OpRsh16x8(v, config)
+ return rewriteValueARM_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValueARM_OpRsh32Ux16(v, config)
+ return rewriteValueARM_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValueARM_OpRsh32Ux32(v, config)
+ return rewriteValueARM_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValueARM_OpRsh32Ux64(v, config)
+ return rewriteValueARM_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValueARM_OpRsh32Ux8(v, config)
+ return rewriteValueARM_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValueARM_OpRsh32x16(v, config)
+ return rewriteValueARM_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValueARM_OpRsh32x32(v, config)
+ return rewriteValueARM_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValueARM_OpRsh32x64(v, config)
+ return rewriteValueARM_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValueARM_OpRsh32x8(v, config)
+ return rewriteValueARM_OpRsh32x8(v)
case OpRsh8Ux16:
- return rewriteValueARM_OpRsh8Ux16(v, config)
+ return rewriteValueARM_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValueARM_OpRsh8Ux32(v, config)
+ return rewriteValueARM_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValueARM_OpRsh8Ux64(v, config)
+ return rewriteValueARM_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValueARM_OpRsh8Ux8(v, config)
+ return rewriteValueARM_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValueARM_OpRsh8x16(v, config)
+ return rewriteValueARM_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValueARM_OpRsh8x32(v, config)
+ return rewriteValueARM_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValueARM_OpRsh8x64(v, config)
+ return rewriteValueARM_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValueARM_OpRsh8x8(v, config)
+ return rewriteValueARM_OpRsh8x8(v)
case OpSelect0:
- return rewriteValueARM_OpSelect0(v, config)
+ return rewriteValueARM_OpSelect0(v)
case OpSelect1:
- return rewriteValueARM_OpSelect1(v, config)
+ return rewriteValueARM_OpSelect1(v)
case OpSignExt16to32:
- return rewriteValueARM_OpSignExt16to32(v, config)
+ return rewriteValueARM_OpSignExt16to32(v)
case OpSignExt8to16:
- return rewriteValueARM_OpSignExt8to16(v, config)
+ return rewriteValueARM_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValueARM_OpSignExt8to32(v, config)
+ return rewriteValueARM_OpSignExt8to32(v)
case OpSignmask:
- return rewriteValueARM_OpSignmask(v, config)
+ return rewriteValueARM_OpSignmask(v)
case OpSlicemask:
- return rewriteValueARM_OpSlicemask(v, config)
+ return rewriteValueARM_OpSlicemask(v)
case OpSqrt:
- return rewriteValueARM_OpSqrt(v, config)
+ return rewriteValueARM_OpSqrt(v)
case OpStaticCall:
- return rewriteValueARM_OpStaticCall(v, config)
+ return rewriteValueARM_OpStaticCall(v)
case OpStore:
- return rewriteValueARM_OpStore(v, config)
+ return rewriteValueARM_OpStore(v)
case OpSub16:
- return rewriteValueARM_OpSub16(v, config)
+ return rewriteValueARM_OpSub16(v)
case OpSub32:
- return rewriteValueARM_OpSub32(v, config)
+ return rewriteValueARM_OpSub32(v)
case OpSub32F:
- return rewriteValueARM_OpSub32F(v, config)
+ return rewriteValueARM_OpSub32F(v)
case OpSub32carry:
- return rewriteValueARM_OpSub32carry(v, config)
+ return rewriteValueARM_OpSub32carry(v)
case OpSub32withcarry:
- return rewriteValueARM_OpSub32withcarry(v, config)
+ return rewriteValueARM_OpSub32withcarry(v)
case OpSub64F:
- return rewriteValueARM_OpSub64F(v, config)
+ return rewriteValueARM_OpSub64F(v)
case OpSub8:
- return rewriteValueARM_OpSub8(v, config)
+ return rewriteValueARM_OpSub8(v)
case OpSubPtr:
- return rewriteValueARM_OpSubPtr(v, config)
+ return rewriteValueARM_OpSubPtr(v)
case OpTrunc16to8:
- return rewriteValueARM_OpTrunc16to8(v, config)
+ return rewriteValueARM_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValueARM_OpTrunc32to16(v, config)
+ return rewriteValueARM_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValueARM_OpTrunc32to8(v, config)
+ return rewriteValueARM_OpTrunc32to8(v)
case OpXor16:
- return rewriteValueARM_OpXor16(v, config)
+ return rewriteValueARM_OpXor16(v)
case OpXor32:
- return rewriteValueARM_OpXor32(v, config)
+ return rewriteValueARM_OpXor32(v)
case OpXor8:
- return rewriteValueARM_OpXor8(v, config)
+ return rewriteValueARM_OpXor8(v)
case OpZero:
- return rewriteValueARM_OpZero(v, config)
+ return rewriteValueARM_OpZero(v)
case OpZeroExt16to32:
- return rewriteValueARM_OpZeroExt16to32(v, config)
+ return rewriteValueARM_OpZeroExt16to32(v)
case OpZeroExt8to16:
- return rewriteValueARM_OpZeroExt8to16(v, config)
+ return rewriteValueARM_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValueARM_OpZeroExt8to32(v, config)
+ return rewriteValueARM_OpZeroExt8to32(v)
case OpZeromask:
- return rewriteValueARM_OpZeromask(v, config)
+ return rewriteValueARM_OpZeromask(v)
}
return false
}
-func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMADC(v *Value) bool {
// match: (ADC (MOVWconst [c]) x flags)
// cond:
// result: (ADCconst [c] x flags)
}
return false
}
-func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMADCconst(v *Value) bool {
// match: (ADCconst [c] (ADDconst [d] x) flags)
// cond:
// result: (ADCconst [int64(int32(c+d))] x flags)
}
return false
}
-func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (ADCshiftLL (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMADCshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADCshiftLLreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (ADCshiftRA (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMADCshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADCshiftRAreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (ADCshiftRL (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMADCshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADCshiftRLreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADD(v *Value) bool {
b := v.Block
_ = b
// match: (ADD (MOVWconst [c]) x)
}
return false
}
-func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMADDS(v *Value) bool {
// match: (ADDS (MOVWconst [c]) x)
// cond:
// result: (ADDSconst [c] x)
}
return false
}
-func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (ADDSshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMADDSshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADDSshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (ADDSshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMADDSshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADDSshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (ADDSshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMADDSshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADDSshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMADDconst(v *Value) bool {
// match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
// cond:
// result: (MOVWaddr [off1+off2] {sym} ptr)
}
return false
}
-func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMADDshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMADDshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMADDshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMAND(v *Value) bool {
// match: (AND (MOVWconst [c]) x)
// cond:
// result: (ANDconst [c] x)
}
return false
}
-func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMANDconst(v *Value) bool {
// match: (ANDconst [0] _)
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMBIC(v *Value) bool {
// match: (BIC x (MOVWconst [c]))
// cond:
// result: (BICconst [c] x)
}
return false
}
-func rewriteValueARM_OpARMBICconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMBICconst(v *Value) bool {
// match: (BICconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueARM_OpARMBICshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMBICshiftLL(v *Value) bool {
// match: (BICshiftLL x (MOVWconst [c]) [d])
// cond:
// result: (BICconst x [int64(uint32(c)<<uint64(d))])
}
return false
}
-func rewriteValueARM_OpARMBICshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool {
// match: (BICshiftLLreg x y (MOVWconst [c]))
// cond:
// result: (BICshiftLL x y [c])
}
return false
}
-func rewriteValueARM_OpARMBICshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMBICshiftRA(v *Value) bool {
// match: (BICshiftRA x (MOVWconst [c]) [d])
// cond:
// result: (BICconst x [int64(int32(c)>>uint64(d))])
}
return false
}
-func rewriteValueARM_OpARMBICshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool {
// match: (BICshiftRAreg x y (MOVWconst [c]))
// cond:
// result: (BICshiftRA x y [c])
}
return false
}
-func rewriteValueARM_OpARMBICshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMBICshiftRL(v *Value) bool {
// match: (BICshiftRL x (MOVWconst [c]) [d])
// cond:
// result: (BICconst x [int64(uint32(c)>>uint64(d))])
}
return false
}
-func rewriteValueARM_OpARMBICshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool {
// match: (BICshiftRLreg x y (MOVWconst [c]))
// cond:
// result: (BICshiftRL x y [c])
}
return false
}
-func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool {
// match: (CMOVWHSconst _ (FlagEQ) [c])
// cond:
// result: (MOVWconst [c])
}
return false
}
-func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool {
// match: (CMOVWLSconst _ (FlagEQ) [c])
// cond:
// result: (MOVWconst [c])
}
return false
}
-func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMP(v *Value) bool {
b := v.Block
_ = b
// match: (CMP x (MOVWconst [c]))
}
return false
}
-func rewriteValueARM_OpARMCMPD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMCMPD(v *Value) bool {
// match: (CMPD x (MOVDconst [0]))
// cond:
// result: (CMPD0 x)
}
return false
}
-func rewriteValueARM_OpARMCMPF(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMCMPF(v *Value) bool {
// match: (CMPF x (MOVFconst [0]))
// cond:
// result: (CMPF0 x)
}
return false
}
-func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMCMPconst(v *Value) bool {
// match: (CMPconst (MOVWconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMCMPshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMCMPshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMCMPshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMEqual(v *Value) bool {
// match: (Equal (FlagEQ))
// cond:
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMGreaterEqual(v *Value) bool {
// match: (GreaterEqual (FlagEQ))
// cond:
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMGreaterEqualU(v *Value) bool {
// match: (GreaterEqualU (FlagEQ))
// cond:
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMGreaterThan(v *Value) bool {
// match: (GreaterThan (FlagEQ))
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMGreaterThanU(v *Value) bool {
// match: (GreaterThanU (FlagEQ))
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMLessEqual(v *Value) bool {
// match: (LessEqual (FlagEQ))
// cond:
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMLessEqualU(v *Value) bool {
// match: (LessEqualU (FlagEQ))
// cond:
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMLessThan(v *Value) bool {
// match: (LessThan (FlagEQ))
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMLessThanU(v *Value) bool {
// match: (LessThanU (FlagEQ))
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
// match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
// result: (MOVBUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVBUreg(v *Value) bool {
// match: (MOVBUreg x:(MOVBUload _ _))
// cond:
// result: (MOVWreg x)
}
return false
}
-func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVBload(v *Value) bool {
// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
// result: (MOVBload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVBreg(v *Value) bool {
// match: (MOVBreg x:(MOVBload _ _))
// cond:
// result: (MOVWreg x)
}
return false
}
-func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVDload(v *Value) bool {
// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
// result: (MOVDload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
// result: (MOVDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVFload(v *Value) bool {
// match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
// result: (MOVFload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
// match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
// result: (MOVFstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
// result: (MOVHUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVHUreg(v *Value) bool {
// match: (MOVHUreg x:(MOVBUload _ _))
// cond:
// result: (MOVWreg x)
}
return false
}
-func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVHload(v *Value) bool {
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
// result: (MOVHload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVHreg(v *Value) bool {
// match: (MOVHreg x:(MOVBload _ _))
// cond:
// result: (MOVWreg x)
}
return false
}
-func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
// result: (MOVHstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWload(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond:
// result: (MOVWload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
// match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
// cond: isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool {
// match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool {
// match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool {
// match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
// cond: c==d && isSamePtr(ptr, ptr2)
// result: x
}
return false
}
-func rewriteValueARM_OpARMMOVWreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWreg(v *Value) bool {
// match: (MOVWreg x)
// cond: x.Uses == 1
// result: (MOVWnop x)
}
return false
}
-func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond:
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
// match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
// cond:
// result: (MOVWstore [c] ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool {
// match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
// cond:
// result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value) bool {
// match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
// cond:
// result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool {
// match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
// cond:
// result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
}
return false
}
-func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMUL(v *Value) bool {
b := v.Block
_ = b
// match: (MUL x (MOVWconst [c]))
}
return false
}
-func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMULA(v *Value) bool {
b := v.Block
_ = b
// match: (MULA x (MOVWconst [c]) a)
}
return false
}
-func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMVN(v *Value) bool {
// match: (MVN (MOVWconst [c]))
// cond:
// result: (MOVWconst [^c])
}
return false
}
-func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMVNshiftLL(v *Value) bool {
// match: (MVNshiftLL (MOVWconst [c]) [d])
// cond:
// result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
}
return false
}
-func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMVNshiftLLreg(v *Value) bool {
// match: (MVNshiftLLreg x (MOVWconst [c]))
// cond:
// result: (MVNshiftLL x [c])
}
return false
}
-func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMVNshiftRA(v *Value) bool {
// match: (MVNshiftRA (MOVWconst [c]) [d])
// cond:
// result: (MOVWconst [^int64(int32(c)>>uint64(d))])
}
return false
}
-func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMVNshiftRAreg(v *Value) bool {
// match: (MVNshiftRAreg x (MOVWconst [c]))
// cond:
// result: (MVNshiftRA x [c])
}
return false
}
-func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMVNshiftRL(v *Value) bool {
// match: (MVNshiftRL (MOVWconst [c]) [d])
// cond:
// result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
}
return false
}
-func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool {
// match: (MVNshiftRLreg x (MOVWconst [c]))
// cond:
// result: (MVNshiftRL x [c])
}
return false
}
-func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMNotEqual(v *Value) bool {
// match: (NotEqual (FlagEQ))
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMOR(v *Value) bool {
// match: (OR (MOVWconst [c]) x)
// cond:
// result: (ORconst [c] x)
}
return false
}
-func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMORconst(v *Value) bool {
// match: (ORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMRSB(v *Value) bool {
// match: (RSB (MOVWconst [c]) x)
// cond:
// result: (SUBconst [c] x)
}
return false
}
-func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (RSBSshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSBSshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (RSBSshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSBSshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (RSBSshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSBSshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMRSBconst(v *Value) bool {
// match: (RSBconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [int64(int32(c-d))])
}
return false
}
-func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (RSBshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSBshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (RSBshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSBshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (RSBshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSBshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMRSCconst(v *Value) bool {
// match: (RSCconst [c] (ADDconst [d] x) flags)
// cond:
// result: (RSCconst [int64(int32(c-d))] x flags)
}
return false
}
-func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSBC(v *Value) bool {
// match: (SBC (MOVWconst [c]) x flags)
// cond:
// result: (RSCconst [c] x flags)
}
return false
}
-func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSBCconst(v *Value) bool {
// match: (SBCconst [c] (ADDconst [d] x) flags)
// cond:
// result: (SBCconst [int64(int32(c-d))] x flags)
}
return false
}
-func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
}
return false
}
-func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
}
return false
}
-func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSLL(v *Value) bool {
// match: (SLL x (MOVWconst [c]))
// cond:
// result: (SLLconst x [c&31])
}
return false
}
-func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSLLconst(v *Value) bool {
// match: (SLLconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [int64(uint32(d)<<uint64(c))])
}
return false
}
-func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSRA(v *Value) bool {
// match: (SRA x (MOVWconst [c]))
// cond:
// result: (SRAconst x [c&31])
}
return false
}
-func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSRAcond(v *Value) bool {
// match: (SRAcond x _ (FlagEQ))
// cond:
// result: (SRAconst x [31])
}
return false
}
-func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSRAconst(v *Value) bool {
// match: (SRAconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [int64(int32(d)>>uint64(c))])
}
return false
}
-func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSRL(v *Value) bool {
// match: (SRL x (MOVWconst [c]))
// cond:
// result: (SRLconst x [c&31])
}
return false
}
-func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSRLconst(v *Value) bool {
// match: (SRLconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [int64(uint32(d)>>uint64(c))])
}
return false
}
-func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSUB(v *Value) bool {
// match: (SUB (MOVWconst [c]) x)
// cond:
// result: (RSBconst [c] x)
}
return false
}
-func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSUBS(v *Value) bool {
// match: (SUBS (MOVWconst [c]) x)
// cond:
// result: (RSBSconst [c] x)
}
return false
}
-func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (SUBSshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (SUBSshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (SUBSshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (SUBSshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (SUBSshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (SUBSshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMSUBconst(v *Value) bool {
// match: (SUBconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (SUBshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (SUBshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (SUBshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (SUBshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (SUBshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (SUBshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMXOR(v *Value) bool {
// match: (XOR (MOVWconst [c]) x)
// cond:
// result: (XORconst [c] x)
}
return false
}
-func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpARMXORconst(v *Value) bool {
// match: (XORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftLL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftLLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftRA (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftRAreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftRL (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftRLreg (MOVWconst [c]) x y)
}
return false
}
-func rewriteValueARM_OpARMXORshiftRR(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRR(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftRR (MOVWconst [c]) x [d])
}
return false
}
-func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAdd16(v *Value) bool {
// match: (Add16 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAdd32(v *Value) bool {
// match: (Add32 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAdd32F(v *Value) bool {
// match: (Add32F x y)
// cond:
// result: (ADDF x y)
return true
}
}
-func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAdd32carry(v *Value) bool {
// match: (Add32carry x y)
// cond:
// result: (ADDS x y)
return true
}
}
-func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAdd32withcarry(v *Value) bool {
// match: (Add32withcarry x y c)
// cond:
// result: (ADC x y c)
return true
}
}
-func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAdd64F(v *Value) bool {
// match: (Add64F x y)
// cond:
// result: (ADDD x y)
return true
}
}
-func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAdd8(v *Value) bool {
// match: (Add8 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAddPtr(v *Value) bool {
// match: (AddPtr x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAddr(v *Value) bool {
// match: (Addr {sym} base)
// cond:
// result: (MOVWaddr {sym} base)
return true
}
}
-func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAnd16(v *Value) bool {
// match: (And16 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAnd32(v *Value) bool {
// match: (And32 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAnd8(v *Value) bool {
// match: (And8 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpAndB(v *Value) bool {
// match: (AndB x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM_OpAvg32u(v *Value, config *Config) bool {
+func rewriteValueARM_OpAvg32u(v *Value) bool {
b := v.Block
_ = b
// match: (Avg32u <t> x y)
return true
}
}
-func rewriteValueARM_OpBitLen32(v *Value, config *Config) bool {
+func rewriteValueARM_OpBitLen32(v *Value) bool {
b := v.Block
_ = b
// match: (BitLen32 <t> x)
return true
}
}
-func rewriteValueARM_OpBswap32(v *Value, config *Config) bool {
+func rewriteValueARM_OpBswap32(v *Value) bool {
b := v.Block
_ = b
// match: (Bswap32 <t> x)
return true
}
}
-func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpClosureCall(v *Value) bool {
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
return true
}
}
-func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCom16(v *Value) bool {
// match: (Com16 x)
// cond:
// result: (MVN x)
return true
}
}
-func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCom32(v *Value) bool {
// match: (Com32 x)
// cond:
// result: (MVN x)
return true
}
}
-func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCom8(v *Value) bool {
// match: (Com8 x)
// cond:
// result: (MVN x)
return true
}
}
-func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpConst16(v *Value) bool {
// match: (Const16 [val])
// cond:
// result: (MOVWconst [val])
return true
}
}
-func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpConst32(v *Value) bool {
// match: (Const32 [val])
// cond:
// result: (MOVWconst [val])
return true
}
}
-func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// cond:
// result: (MOVFconst [val])
return true
}
}
-func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpConst8(v *Value) bool {
// match: (Const8 [val])
// cond:
// result: (MOVWconst [val])
return true
}
}
-func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// cond:
// result: (MOVWconst [b])
return true
}
}
-func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpConstNil(v *Value) bool {
// match: (ConstNil)
// cond:
// result: (MOVWconst [0])
return true
}
}
-func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpConvert(v *Value) bool {
// match: (Convert x mem)
// cond:
// result: (MOVWconvert x mem)
return true
}
}
-func rewriteValueARM_OpCtz32(v *Value, config *Config) bool {
+func rewriteValueARM_OpCtz32(v *Value) bool {
b := v.Block
_ = b
// match: (Ctz32 <t> x)
return true
}
}
-func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt32Fto32(v *Value) bool {
// match: (Cvt32Fto32 x)
// cond:
// result: (MOVFW x)
return true
}
}
-func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt32Fto32U(v *Value) bool {
// match: (Cvt32Fto32U x)
// cond:
// result: (MOVFWU x)
return true
}
}
-func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F x)
// cond:
// result: (MOVFD x)
return true
}
}
-func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt32Uto32F(v *Value) bool {
// match: (Cvt32Uto32F x)
// cond:
// result: (MOVWUF x)
return true
}
}
-func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt32Uto64F(v *Value) bool {
// match: (Cvt32Uto64F x)
// cond:
// result: (MOVWUD x)
return true
}
}
-func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt32to32F(v *Value) bool {
// match: (Cvt32to32F x)
// cond:
// result: (MOVWF x)
return true
}
}
-func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt32to64F(v *Value) bool {
// match: (Cvt32to64F x)
// cond:
// result: (MOVWD x)
return true
}
}
-func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt64Fto32(v *Value) bool {
// match: (Cvt64Fto32 x)
// cond:
// result: (MOVDW x)
return true
}
}
-func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F x)
// cond:
// result: (MOVDF x)
return true
}
}
-func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpCvt64Fto32U(v *Value) bool {
// match: (Cvt64Fto32U x)
// cond:
// result: (MOVDWU x)
return true
}
}
-func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16 x y)
// cond:
// result: (Div32 (SignExt16to32 x) (SignExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpDiv32)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16u x y)
// cond:
// result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpDiv32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32 x y)
// cond:
- // result: (SUB (XOR <config.fe.TypeUInt32()> (Select0 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} (SUB <config.fe.TypeUInt32()> (XOR x <config.fe.TypeUInt32()> (Signmask x)) (Signmask x)) (SUB <config.fe.TypeUInt32()> (XOR y <config.fe.TypeUInt32()> (Signmask y)) (Signmask y)))) (Signmask (XOR <config.fe.TypeUInt32()> x y))) (Signmask (XOR <config.fe.TypeUInt32()> x y)))
+ // result: (SUB (XOR <fe.TypeUInt32()> (Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} (SUB <fe.TypeUInt32()> (XOR x <fe.TypeUInt32()> (Signmask x)) (Signmask x)) (SUB <fe.TypeUInt32()> (XOR y <fe.TypeUInt32()> (Signmask y)) (Signmask y)))) (Signmask (XOR <fe.TypeUInt32()> x y))) (Signmask (XOR <fe.TypeUInt32()> x y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSUB)
- v0 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpSelect0, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpSelect0, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v2.Aux = config.ctxt.Lookup("udiv", 0)
- v3 := b.NewValue0(v.Pos, OpARMSUB, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpARMSUB, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeUInt32())
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v5 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v5.AddArg(x)
v4.AddArg(v5)
v3.AddArg(v4)
- v6 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v6 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v6.AddArg(x)
v3.AddArg(v6)
v2.AddArg(v3)
- v7 := b.NewValue0(v.Pos, OpARMSUB, config.fe.TypeUInt32())
- v8 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpARMSUB, fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeUInt32())
v8.AddArg(y)
- v9 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v9 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v9.AddArg(y)
v8.AddArg(v9)
v7.AddArg(v8)
- v10 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v10 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v10.AddArg(y)
v7.AddArg(v10)
v2.AddArg(v7)
v1.AddArg(v2)
v0.AddArg(v1)
- v11 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
- v12 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeUInt32())
+ v11 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
+ v12 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeUInt32())
v12.AddArg(x)
v12.AddArg(y)
v11.AddArg(v12)
v0.AddArg(v11)
v.AddArg(v0)
- v13 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
- v14 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeUInt32())
+ v13 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
+ v14 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeUInt32())
v14.AddArg(x)
v14.AddArg(y)
v13.AddArg(v14)
return true
}
}
-func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpDiv32F(v *Value) bool {
// match: (Div32F x y)
// cond:
// result: (DIVF x y)
return true
}
}
-func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32u x y)
// cond:
- // result: (Select0 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
+ // result: (Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v.Type = config.fe.TypeUInt32()
- v0 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v.Type = fe.TypeUInt32()
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v0.Aux = config.ctxt.Lookup("udiv", 0)
v0.AddArg(x)
v0.AddArg(y)
return true
}
}
-func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpDiv64F(v *Value) bool {
// match: (Div64F x y)
// cond:
// result: (DIVD x y)
return true
}
}
-func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 x y)
// cond:
// result: (Div32 (SignExt8to32 x) (SignExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpDiv32)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u x y)
// cond:
// result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpDiv32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq16 x y)
// cond:
// result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARMEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq32(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32 x y)
return true
}
}
-func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32F x y)
return true
}
}
-func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64F x y)
return true
}
}
-func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq8 x y)
// cond:
// result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARMEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
+func rewriteValueARM_OpEqB(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqB x y)
// cond:
- // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+ // result: (XORconst [1] (XOR <fe.TypeBool()> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeBool())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
// match: (EqPtr x y)
return true
}
}
-func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16 x y)
// cond:
// result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpARMGreaterEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16U x y)
// cond:
// result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARMGreaterEqualU)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32 x y)
return true
}
}
-func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32F x y)
return true
}
}
-func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32U x y)
return true
}
}
-func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64F x y)
return true
}
}
-func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8 x y)
// cond:
// result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpARMGreaterEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8U x y)
// cond:
// result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARMGreaterEqualU)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpGetClosurePtr(v *Value) bool {
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
return true
}
}
-func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16 x y)
// cond:
// result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpARMGreaterThan)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16U x y)
// cond:
// result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARMGreaterThanU)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater32(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32 x y)
return true
}
}
-func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater32F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32F x y)
return true
}
}
-func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater32U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32U x y)
return true
}
}
-func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater64F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64F x y)
return true
}
}
-func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8 x y)
// cond:
// result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpARMGreaterThan)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8U x y)
// cond:
// result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARMGreaterThanU)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpHmul32(v *Value) bool {
// match: (Hmul32 x y)
// cond:
// result: (HMUL x y)
return true
}
}
-func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpHmul32u(v *Value) bool {
// match: (Hmul32u x y)
// cond:
// result: (HMULU x y)
return true
}
}
-func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpInterCall(v *Value) bool {
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
return true
}
}
-func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValueARM_OpIsInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsInBounds idx len)
return true
}
}
-func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueARM_OpIsNonNil(v *Value) bool {
b := v.Block
_ = b
// match: (IsNonNil ptr)
return true
}
}
-func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueARM_OpIsSliceInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsSliceInBounds idx len)
return true
}
}
-func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16 x y)
// cond:
// result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpARMLessEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16U x y)
// cond:
// result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARMLessEqualU)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32 x y)
return true
}
}
-func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
return true
}
}
-func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32U x y)
return true
}
}
-func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64F x y)
return true
}
}
-func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8 x y)
// cond:
// result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpARMLessEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8U x y)
// cond:
// result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARMLessEqualU)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16 x y)
// cond:
// result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpARMLessThan)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16U x y)
// cond:
// result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARMLessThanU)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess32(v *Value) bool {
b := v.Block
_ = b
// match: (Less32 x y)
return true
}
}
-func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess32F(v *Value) bool {
b := v.Block
_ = b
// match: (Less32F x y)
return true
}
}
-func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess32U(v *Value) bool {
b := v.Block
_ = b
// match: (Less32U x y)
return true
}
}
-func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess64F(v *Value) bool {
b := v.Block
_ = b
// match: (Less64F x y)
return true
}
}
-func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8 x y)
// cond:
// result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpARMLessThan)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8U x y)
// cond:
// result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARMLessThanU)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpLoad(v *Value) bool {
// match: (Load <t> ptr mem)
// cond: t.IsBoolean()
// result: (MOVBUload ptr mem)
}
return false
}
-func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x16 x y)
// cond:
// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
return true
}
}
-func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x32 x y)
return true
}
}
-func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpLsh16x64(v *Value) bool {
// match: (Lsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SLLconst x [c])
}
return false
}
-func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x8 x y)
// cond:
// result: (SLL x (ZeroExt8to32 y))
y := v.Args[1]
v.reset(OpARMSLL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x16 x y)
// cond:
// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
return true
}
}
-func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x32 x y)
return true
}
}
-func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpLsh32x64(v *Value) bool {
// match: (Lsh32x64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SLLconst x [c])
}
return false
}
-func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x8 x y)
// cond:
// result: (SLL x (ZeroExt8to32 y))
y := v.Args[1]
v.reset(OpARMSLL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x16 x y)
// cond:
// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
return true
}
}
-func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x32 x y)
return true
}
}
-func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpLsh8x64(v *Value) bool {
// match: (Lsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SLLconst x [c])
}
return false
}
-func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x8 x y)
// cond:
// result: (SLL x (ZeroExt8to32 y))
y := v.Args[1]
v.reset(OpARMSLL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16 x y)
// cond:
// result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16u x y)
// cond:
// result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32 x y)
// cond:
- // result: (SUB (XOR <config.fe.TypeUInt32()> (Select1 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} (SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> x (Signmask x)) (Signmask x)) (SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x))
+ // result: (SUB (XOR <fe.TypeUInt32()> (Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} (SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> x (Signmask x)) (Signmask x)) (SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSUB)
- v0 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpSelect1, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpSelect1, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v2.Aux = config.ctxt.Lookup("udiv", 0)
- v3 := b.NewValue0(v.Pos, OpARMSUB, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpARMSUB, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeUInt32())
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v5 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v5.AddArg(x)
v4.AddArg(v5)
v3.AddArg(v4)
- v6 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v6 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v6.AddArg(x)
v3.AddArg(v6)
v2.AddArg(v3)
- v7 := b.NewValue0(v.Pos, OpARMSUB, config.fe.TypeUInt32())
- v8 := b.NewValue0(v.Pos, OpARMXOR, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpARMSUB, fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpARMXOR, fe.TypeUInt32())
v8.AddArg(y)
- v9 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v9 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v9.AddArg(y)
v8.AddArg(v9)
v7.AddArg(v8)
- v10 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v10 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v10.AddArg(y)
v7.AddArg(v10)
v2.AddArg(v7)
v1.AddArg(v2)
v0.AddArg(v1)
- v11 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v11 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v11.AddArg(x)
v0.AddArg(v11)
v.AddArg(v0)
- v12 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v12 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v12.AddArg(x)
v.AddArg(v12)
return true
}
}
-func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32u x y)
// cond:
- // result: (Select1 <config.fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
+ // result: (Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v.Type = config.fe.TypeUInt32()
- v0 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v.Type = fe.TypeUInt32()
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v0.Aux = config.ctxt.Lookup("udiv", 0)
v0.AddArg(x)
v0.AddArg(y)
return true
}
}
-func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8 x y)
// cond:
// result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
+func rewriteValueARM_OpMod8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8u x y)
// cond:
// result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+func rewriteValueARM_OpMove(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Move [0] _ _ mem)
// cond:
// result: mem
mem := v.Args[2]
v.reset(OpARMMOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
}
v.reset(OpARMMOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpARMMOVBstore)
v.AuxInt = 1
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
}
v.reset(OpARMMOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVWload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWload, fe.TypeUInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpARMMOVHstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVHUload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, fe.TypeUInt16())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARMMOVHstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARMMOVHUload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpARMMOVHUload, fe.TypeUInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpARMMOVBstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v3 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v6 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
v.reset(OpARMMOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpARMMOVBUload, config.fe.TypeUInt8())
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, fe.TypeUInt8())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
}
return false
}
-func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpMul16(v *Value) bool {
// match: (Mul16 x y)
// cond:
// result: (MUL x y)
return true
}
}
-func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpMul32(v *Value) bool {
// match: (Mul32 x y)
// cond:
// result: (MUL x y)
return true
}
}
-func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpMul32F(v *Value) bool {
// match: (Mul32F x y)
// cond:
// result: (MULF x y)
return true
}
}
-func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpMul32uhilo(v *Value) bool {
// match: (Mul32uhilo x y)
// cond:
// result: (MULLU x y)
return true
}
}
-func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpMul64F(v *Value) bool {
// match: (Mul64F x y)
// cond:
// result: (MULD x y)
return true
}
}
-func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpMul8(v *Value) bool {
// match: (Mul8 x y)
// cond:
// result: (MUL x y)
return true
}
}
-func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpNeg16(v *Value) bool {
// match: (Neg16 x)
// cond:
// result: (RSBconst [0] x)
return true
}
}
-func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpNeg32(v *Value) bool {
// match: (Neg32 x)
// cond:
// result: (RSBconst [0] x)
return true
}
}
-func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpNeg32F(v *Value) bool {
// match: (Neg32F x)
// cond:
// result: (NEGF x)
return true
}
}
-func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpNeg64F(v *Value) bool {
// match: (Neg64F x)
// cond:
// result: (NEGD x)
return true
}
}
-func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpNeg8(v *Value) bool {
// match: (Neg8 x)
// cond:
// result: (RSBconst [0] x)
return true
}
}
-func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq16 x y)
// cond:
// result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARMNotEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32 x y)
return true
}
}
-func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32F x y)
return true
}
}
-func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
return true
}
}
-func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq8 x y)
// cond:
// result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARMNotEqual)
v0 := b.NewValue0(v.Pos, OpARMCMP, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpNeqB(v *Value) bool {
// match: (NeqB x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeqPtr(v *Value) bool {
b := v.Block
_ = b
// match: (NeqPtr x y)
return true
}
}
-func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpNilCheck(v *Value) bool {
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
return true
}
}
-func rewriteValueARM_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpNot(v *Value) bool {
// match: (Not x)
// cond:
// result: (XORconst [1] x)
return true
}
}
-func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpOffPtr(v *Value) bool {
// match: (OffPtr [off] ptr:(SP))
// cond:
// result: (MOVWaddr [off] ptr)
return true
}
}
-func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpOr16(v *Value) bool {
// match: (Or16 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpOr32(v *Value) bool {
// match: (Or32 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpOr8(v *Value) bool {
// match: (Or8 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpOrB(v *Value) bool {
// match: (OrB x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpRound32F(v *Value) bool {
// match: (Round32F x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpRound64F(v *Value) bool {
// match: (Round64F x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux16 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
v.reset(OpARMCMOVWHSconst)
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v3.AuxInt = 256
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux32 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
v.reset(OpARMCMOVWHSconst)
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint64(c) < 16
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ // result: (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpARMSRLconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, fe.TypeUInt32())
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
}
return false
}
-func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux8 x y)
// cond:
// result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x16 x y)
// cond:
// result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
return true
}
}
-func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x32 x y)
// cond:
// result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ // result: (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpARMSRAconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, fe.TypeUInt32())
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
}
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint64(c) >= 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+ // result: (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpARMSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, fe.TypeUInt32())
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
}
return false
}
-func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x8 x y)
// cond:
// result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux16 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
return true
}
}
-func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux32 x y)
return true
}
}
-func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpRsh32Ux64(v *Value) bool {
// match: (Rsh32Ux64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SRLconst x [c])
}
return false
}
-func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux8 x y)
// cond:
// result: (SRL x (ZeroExt8to32 y))
y := v.Args[1]
v.reset(OpARMSRL)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x16 x y)
// cond:
// result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARMSRAcond)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v1.AuxInt = 256
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x32 x y)
return true
}
}
-func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpRsh32x64(v *Value) bool {
// match: (Rsh32x64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SRAconst x [c])
}
return false
}
-func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x8 x y)
// cond:
// result: (SRA x (ZeroExt8to32 y))
y := v.Args[1]
v.reset(OpARMSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux16 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
v.reset(OpARMCMOVWHSconst)
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v3.AuxInt = 256
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux32 x y)
// cond:
// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
v.reset(OpARMCMOVWHSconst)
v.AuxInt = 0
v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint64(c) < 8
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ // result: (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpARMSRLconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, fe.TypeUInt32())
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
}
return false
}
-func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux8 x y)
// cond:
// result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRL)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x16 x y)
// cond:
// result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpARMCMPconst, TypeFlags)
v2.AuxInt = 256
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v3.AddArg(y)
v2.AddArg(v3)
v.AddArg(v2)
return true
}
}
-func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x32 x y)
// cond:
// result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRAcond)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
return true
}
}
-func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ // result: (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpARMSRAconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, fe.TypeUInt32())
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
}
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint64(c) >= 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+ // result: (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpARMSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpARMSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, fe.TypeUInt32())
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
}
return false
}
-func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x8 x y)
// cond:
// result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARMSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM_OpSelect0(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSelect0(v *Value) bool {
// match: (Select0 (CALLudiv x (MOVWconst [1])))
// cond:
// result: x
}
return false
}
-func rewriteValueARM_OpSelect1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSelect1(v *Value) bool {
// match: (Select1 (CALLudiv _ (MOVWconst [1])))
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSignmask(v *Value) bool {
// match: (Signmask x)
// cond:
// result: (SRAconst x [31])
return true
}
}
-func rewriteValueARM_OpSlicemask(v *Value, config *Config) bool {
+func rewriteValueARM_OpSlicemask(v *Value) bool {
b := v.Block
_ = b
// match: (Slicemask <t> x)
return true
}
}
-func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSqrt(v *Value) bool {
// match: (Sqrt x)
// cond:
// result: (SQRTD x)
return true
}
}
-func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpStaticCall(v *Value) bool {
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
return true
}
}
-func rewriteValueARM_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpStore(v *Value) bool {
// match: (Store {t} ptr val mem)
// cond: t.(Type).Size() == 1
// result: (MOVBstore ptr val mem)
}
return false
}
-func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSub16(v *Value) bool {
// match: (Sub16 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSub32(v *Value) bool {
// match: (Sub32 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSub32F(v *Value) bool {
// match: (Sub32F x y)
// cond:
// result: (SUBF x y)
return true
}
}
-func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSub32carry(v *Value) bool {
// match: (Sub32carry x y)
// cond:
// result: (SUBS x y)
return true
}
}
-func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSub32withcarry(v *Value) bool {
// match: (Sub32withcarry x y c)
// cond:
// result: (SBC x y c)
return true
}
}
-func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSub64F(v *Value) bool {
// match: (Sub64F x y)
// cond:
// result: (SUBD x y)
return true
}
}
-func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSub8(v *Value) bool {
// match: (Sub8 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpSubPtr(v *Value) bool {
// match: (SubPtr x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpXor16(v *Value) bool {
// match: (Xor16 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM_OpXor32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpXor32(v *Value) bool {
// match: (Xor32 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM_OpXor8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpXor8(v *Value) bool {
// match: (Xor8 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM_OpZero(v *Value, config *Config) bool {
+func rewriteValueARM_OpZero(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Zero [0] _ mem)
// cond:
// result: mem
mem := v.Args[1]
v.reset(OpARMMOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
}
v.reset(OpARMMOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v.reset(OpARMMOVBstore)
v.AuxInt = 1
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
}
v.reset(OpARMMOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v.reset(OpARMMOVHstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARMMOVHstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpARMMOVBstore)
v.AuxInt = 3
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v3.AuxInt = 1
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v4.AuxInt = 0
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
v.reset(OpARMMOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v1.AuxInt = 1
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARMMOVBstore, TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
v.reset(OpARMDUFFZERO)
v.AuxInt = 4 * (128 - int64(s/4))
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
v0.AddArg(ptr)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARMMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(mem)
}
return false
}
-func rewriteValueARM_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVHUreg x)
return true
}
}
-func rewriteValueARM_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteValueARM_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
+func rewriteValueARM_OpZeromask(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Zeromask x)
// cond:
- // result: (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31])
+ // result: (SRAconst (RSBshiftRL <fe.TypeInt32()> x x [1]) [31])
for {
x := v.Args[0]
v.reset(OpARMSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, fe.TypeInt32())
v0.AuxInt = 1
v0.AddArg(x)
v0.AddArg(x)
return true
}
}
-func rewriteBlockARM(b *Block, config *Config) bool {
+func rewriteBlockARM(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case BlockARMEQ:
// match: (EQ (FlagEQ) yes no)
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueARM64(v *Value, config *Config) bool {
+func rewriteValueARM64(v *Value) bool {
switch v.Op {
case OpARM64ADD:
- return rewriteValueARM64_OpARM64ADD(v, config)
+ return rewriteValueARM64_OpARM64ADD(v)
case OpARM64ADDconst:
- return rewriteValueARM64_OpARM64ADDconst(v, config)
+ return rewriteValueARM64_OpARM64ADDconst(v)
case OpARM64ADDshiftLL:
- return rewriteValueARM64_OpARM64ADDshiftLL(v, config)
+ return rewriteValueARM64_OpARM64ADDshiftLL(v)
case OpARM64ADDshiftRA:
- return rewriteValueARM64_OpARM64ADDshiftRA(v, config)
+ return rewriteValueARM64_OpARM64ADDshiftRA(v)
case OpARM64ADDshiftRL:
- return rewriteValueARM64_OpARM64ADDshiftRL(v, config)
+ return rewriteValueARM64_OpARM64ADDshiftRL(v)
case OpARM64AND:
- return rewriteValueARM64_OpARM64AND(v, config)
+ return rewriteValueARM64_OpARM64AND(v)
case OpARM64ANDconst:
- return rewriteValueARM64_OpARM64ANDconst(v, config)
+ return rewriteValueARM64_OpARM64ANDconst(v)
case OpARM64ANDshiftLL:
- return rewriteValueARM64_OpARM64ANDshiftLL(v, config)
+ return rewriteValueARM64_OpARM64ANDshiftLL(v)
case OpARM64ANDshiftRA:
- return rewriteValueARM64_OpARM64ANDshiftRA(v, config)
+ return rewriteValueARM64_OpARM64ANDshiftRA(v)
case OpARM64ANDshiftRL:
- return rewriteValueARM64_OpARM64ANDshiftRL(v, config)
+ return rewriteValueARM64_OpARM64ANDshiftRL(v)
case OpARM64BIC:
- return rewriteValueARM64_OpARM64BIC(v, config)
+ return rewriteValueARM64_OpARM64BIC(v)
case OpARM64BICconst:
- return rewriteValueARM64_OpARM64BICconst(v, config)
+ return rewriteValueARM64_OpARM64BICconst(v)
case OpARM64BICshiftLL:
- return rewriteValueARM64_OpARM64BICshiftLL(v, config)
+ return rewriteValueARM64_OpARM64BICshiftLL(v)
case OpARM64BICshiftRA:
- return rewriteValueARM64_OpARM64BICshiftRA(v, config)
+ return rewriteValueARM64_OpARM64BICshiftRA(v)
case OpARM64BICshiftRL:
- return rewriteValueARM64_OpARM64BICshiftRL(v, config)
+ return rewriteValueARM64_OpARM64BICshiftRL(v)
case OpARM64CMP:
- return rewriteValueARM64_OpARM64CMP(v, config)
+ return rewriteValueARM64_OpARM64CMP(v)
case OpARM64CMPW:
- return rewriteValueARM64_OpARM64CMPW(v, config)
+ return rewriteValueARM64_OpARM64CMPW(v)
case OpARM64CMPWconst:
- return rewriteValueARM64_OpARM64CMPWconst(v, config)
+ return rewriteValueARM64_OpARM64CMPWconst(v)
case OpARM64CMPconst:
- return rewriteValueARM64_OpARM64CMPconst(v, config)
+ return rewriteValueARM64_OpARM64CMPconst(v)
case OpARM64CMPshiftLL:
- return rewriteValueARM64_OpARM64CMPshiftLL(v, config)
+ return rewriteValueARM64_OpARM64CMPshiftLL(v)
case OpARM64CMPshiftRA:
- return rewriteValueARM64_OpARM64CMPshiftRA(v, config)
+ return rewriteValueARM64_OpARM64CMPshiftRA(v)
case OpARM64CMPshiftRL:
- return rewriteValueARM64_OpARM64CMPshiftRL(v, config)
+ return rewriteValueARM64_OpARM64CMPshiftRL(v)
case OpARM64CSELULT:
- return rewriteValueARM64_OpARM64CSELULT(v, config)
+ return rewriteValueARM64_OpARM64CSELULT(v)
case OpARM64CSELULT0:
- return rewriteValueARM64_OpARM64CSELULT0(v, config)
+ return rewriteValueARM64_OpARM64CSELULT0(v)
case OpARM64DIV:
- return rewriteValueARM64_OpARM64DIV(v, config)
+ return rewriteValueARM64_OpARM64DIV(v)
case OpARM64DIVW:
- return rewriteValueARM64_OpARM64DIVW(v, config)
+ return rewriteValueARM64_OpARM64DIVW(v)
case OpARM64Equal:
- return rewriteValueARM64_OpARM64Equal(v, config)
+ return rewriteValueARM64_OpARM64Equal(v)
case OpARM64FMOVDload:
- return rewriteValueARM64_OpARM64FMOVDload(v, config)
+ return rewriteValueARM64_OpARM64FMOVDload(v)
case OpARM64FMOVDstore:
- return rewriteValueARM64_OpARM64FMOVDstore(v, config)
+ return rewriteValueARM64_OpARM64FMOVDstore(v)
case OpARM64FMOVSload:
- return rewriteValueARM64_OpARM64FMOVSload(v, config)
+ return rewriteValueARM64_OpARM64FMOVSload(v)
case OpARM64FMOVSstore:
- return rewriteValueARM64_OpARM64FMOVSstore(v, config)
+ return rewriteValueARM64_OpARM64FMOVSstore(v)
case OpARM64GreaterEqual:
- return rewriteValueARM64_OpARM64GreaterEqual(v, config)
+ return rewriteValueARM64_OpARM64GreaterEqual(v)
case OpARM64GreaterEqualU:
- return rewriteValueARM64_OpARM64GreaterEqualU(v, config)
+ return rewriteValueARM64_OpARM64GreaterEqualU(v)
case OpARM64GreaterThan:
- return rewriteValueARM64_OpARM64GreaterThan(v, config)
+ return rewriteValueARM64_OpARM64GreaterThan(v)
case OpARM64GreaterThanU:
- return rewriteValueARM64_OpARM64GreaterThanU(v, config)
+ return rewriteValueARM64_OpARM64GreaterThanU(v)
case OpARM64LessEqual:
- return rewriteValueARM64_OpARM64LessEqual(v, config)
+ return rewriteValueARM64_OpARM64LessEqual(v)
case OpARM64LessEqualU:
- return rewriteValueARM64_OpARM64LessEqualU(v, config)
+ return rewriteValueARM64_OpARM64LessEqualU(v)
case OpARM64LessThan:
- return rewriteValueARM64_OpARM64LessThan(v, config)
+ return rewriteValueARM64_OpARM64LessThan(v)
case OpARM64LessThanU:
- return rewriteValueARM64_OpARM64LessThanU(v, config)
+ return rewriteValueARM64_OpARM64LessThanU(v)
case OpARM64MOD:
- return rewriteValueARM64_OpARM64MOD(v, config)
+ return rewriteValueARM64_OpARM64MOD(v)
case OpARM64MODW:
- return rewriteValueARM64_OpARM64MODW(v, config)
+ return rewriteValueARM64_OpARM64MODW(v)
case OpARM64MOVBUload:
- return rewriteValueARM64_OpARM64MOVBUload(v, config)
+ return rewriteValueARM64_OpARM64MOVBUload(v)
case OpARM64MOVBUreg:
- return rewriteValueARM64_OpARM64MOVBUreg(v, config)
+ return rewriteValueARM64_OpARM64MOVBUreg(v)
case OpARM64MOVBload:
- return rewriteValueARM64_OpARM64MOVBload(v, config)
+ return rewriteValueARM64_OpARM64MOVBload(v)
case OpARM64MOVBreg:
- return rewriteValueARM64_OpARM64MOVBreg(v, config)
+ return rewriteValueARM64_OpARM64MOVBreg(v)
case OpARM64MOVBstore:
- return rewriteValueARM64_OpARM64MOVBstore(v, config)
+ return rewriteValueARM64_OpARM64MOVBstore(v)
case OpARM64MOVBstorezero:
- return rewriteValueARM64_OpARM64MOVBstorezero(v, config)
+ return rewriteValueARM64_OpARM64MOVBstorezero(v)
case OpARM64MOVDload:
- return rewriteValueARM64_OpARM64MOVDload(v, config)
+ return rewriteValueARM64_OpARM64MOVDload(v)
case OpARM64MOVDreg:
- return rewriteValueARM64_OpARM64MOVDreg(v, config)
+ return rewriteValueARM64_OpARM64MOVDreg(v)
case OpARM64MOVDstore:
- return rewriteValueARM64_OpARM64MOVDstore(v, config)
+ return rewriteValueARM64_OpARM64MOVDstore(v)
case OpARM64MOVDstorezero:
- return rewriteValueARM64_OpARM64MOVDstorezero(v, config)
+ return rewriteValueARM64_OpARM64MOVDstorezero(v)
case OpARM64MOVHUload:
- return rewriteValueARM64_OpARM64MOVHUload(v, config)
+ return rewriteValueARM64_OpARM64MOVHUload(v)
case OpARM64MOVHUreg:
- return rewriteValueARM64_OpARM64MOVHUreg(v, config)
+ return rewriteValueARM64_OpARM64MOVHUreg(v)
case OpARM64MOVHload:
- return rewriteValueARM64_OpARM64MOVHload(v, config)
+ return rewriteValueARM64_OpARM64MOVHload(v)
case OpARM64MOVHreg:
- return rewriteValueARM64_OpARM64MOVHreg(v, config)
+ return rewriteValueARM64_OpARM64MOVHreg(v)
case OpARM64MOVHstore:
- return rewriteValueARM64_OpARM64MOVHstore(v, config)
+ return rewriteValueARM64_OpARM64MOVHstore(v)
case OpARM64MOVHstorezero:
- return rewriteValueARM64_OpARM64MOVHstorezero(v, config)
+ return rewriteValueARM64_OpARM64MOVHstorezero(v)
case OpARM64MOVWUload:
- return rewriteValueARM64_OpARM64MOVWUload(v, config)
+ return rewriteValueARM64_OpARM64MOVWUload(v)
case OpARM64MOVWUreg:
- return rewriteValueARM64_OpARM64MOVWUreg(v, config)
+ return rewriteValueARM64_OpARM64MOVWUreg(v)
case OpARM64MOVWload:
- return rewriteValueARM64_OpARM64MOVWload(v, config)
+ return rewriteValueARM64_OpARM64MOVWload(v)
case OpARM64MOVWreg:
- return rewriteValueARM64_OpARM64MOVWreg(v, config)
+ return rewriteValueARM64_OpARM64MOVWreg(v)
case OpARM64MOVWstore:
- return rewriteValueARM64_OpARM64MOVWstore(v, config)
+ return rewriteValueARM64_OpARM64MOVWstore(v)
case OpARM64MOVWstorezero:
- return rewriteValueARM64_OpARM64MOVWstorezero(v, config)
+ return rewriteValueARM64_OpARM64MOVWstorezero(v)
case OpARM64MUL:
- return rewriteValueARM64_OpARM64MUL(v, config)
+ return rewriteValueARM64_OpARM64MUL(v)
case OpARM64MULW:
- return rewriteValueARM64_OpARM64MULW(v, config)
+ return rewriteValueARM64_OpARM64MULW(v)
case OpARM64MVN:
- return rewriteValueARM64_OpARM64MVN(v, config)
+ return rewriteValueARM64_OpARM64MVN(v)
case OpARM64NEG:
- return rewriteValueARM64_OpARM64NEG(v, config)
+ return rewriteValueARM64_OpARM64NEG(v)
case OpARM64NotEqual:
- return rewriteValueARM64_OpARM64NotEqual(v, config)
+ return rewriteValueARM64_OpARM64NotEqual(v)
case OpARM64OR:
- return rewriteValueARM64_OpARM64OR(v, config)
+ return rewriteValueARM64_OpARM64OR(v)
case OpARM64ORconst:
- return rewriteValueARM64_OpARM64ORconst(v, config)
+ return rewriteValueARM64_OpARM64ORconst(v)
case OpARM64ORshiftLL:
- return rewriteValueARM64_OpARM64ORshiftLL(v, config)
+ return rewriteValueARM64_OpARM64ORshiftLL(v)
case OpARM64ORshiftRA:
- return rewriteValueARM64_OpARM64ORshiftRA(v, config)
+ return rewriteValueARM64_OpARM64ORshiftRA(v)
case OpARM64ORshiftRL:
- return rewriteValueARM64_OpARM64ORshiftRL(v, config)
+ return rewriteValueARM64_OpARM64ORshiftRL(v)
case OpARM64SLL:
- return rewriteValueARM64_OpARM64SLL(v, config)
+ return rewriteValueARM64_OpARM64SLL(v)
case OpARM64SLLconst:
- return rewriteValueARM64_OpARM64SLLconst(v, config)
+ return rewriteValueARM64_OpARM64SLLconst(v)
case OpARM64SRA:
- return rewriteValueARM64_OpARM64SRA(v, config)
+ return rewriteValueARM64_OpARM64SRA(v)
case OpARM64SRAconst:
- return rewriteValueARM64_OpARM64SRAconst(v, config)
+ return rewriteValueARM64_OpARM64SRAconst(v)
case OpARM64SRL:
- return rewriteValueARM64_OpARM64SRL(v, config)
+ return rewriteValueARM64_OpARM64SRL(v)
case OpARM64SRLconst:
- return rewriteValueARM64_OpARM64SRLconst(v, config)
+ return rewriteValueARM64_OpARM64SRLconst(v)
case OpARM64SUB:
- return rewriteValueARM64_OpARM64SUB(v, config)
+ return rewriteValueARM64_OpARM64SUB(v)
case OpARM64SUBconst:
- return rewriteValueARM64_OpARM64SUBconst(v, config)
+ return rewriteValueARM64_OpARM64SUBconst(v)
case OpARM64SUBshiftLL:
- return rewriteValueARM64_OpARM64SUBshiftLL(v, config)
+ return rewriteValueARM64_OpARM64SUBshiftLL(v)
case OpARM64SUBshiftRA:
- return rewriteValueARM64_OpARM64SUBshiftRA(v, config)
+ return rewriteValueARM64_OpARM64SUBshiftRA(v)
case OpARM64SUBshiftRL:
- return rewriteValueARM64_OpARM64SUBshiftRL(v, config)
+ return rewriteValueARM64_OpARM64SUBshiftRL(v)
case OpARM64UDIV:
- return rewriteValueARM64_OpARM64UDIV(v, config)
+ return rewriteValueARM64_OpARM64UDIV(v)
case OpARM64UDIVW:
- return rewriteValueARM64_OpARM64UDIVW(v, config)
+ return rewriteValueARM64_OpARM64UDIVW(v)
case OpARM64UMOD:
- return rewriteValueARM64_OpARM64UMOD(v, config)
+ return rewriteValueARM64_OpARM64UMOD(v)
case OpARM64UMODW:
- return rewriteValueARM64_OpARM64UMODW(v, config)
+ return rewriteValueARM64_OpARM64UMODW(v)
case OpARM64XOR:
- return rewriteValueARM64_OpARM64XOR(v, config)
+ return rewriteValueARM64_OpARM64XOR(v)
case OpARM64XORconst:
- return rewriteValueARM64_OpARM64XORconst(v, config)
+ return rewriteValueARM64_OpARM64XORconst(v)
case OpARM64XORshiftLL:
- return rewriteValueARM64_OpARM64XORshiftLL(v, config)
+ return rewriteValueARM64_OpARM64XORshiftLL(v)
case OpARM64XORshiftRA:
- return rewriteValueARM64_OpARM64XORshiftRA(v, config)
+ return rewriteValueARM64_OpARM64XORshiftRA(v)
case OpARM64XORshiftRL:
- return rewriteValueARM64_OpARM64XORshiftRL(v, config)
+ return rewriteValueARM64_OpARM64XORshiftRL(v)
case OpAdd16:
- return rewriteValueARM64_OpAdd16(v, config)
+ return rewriteValueARM64_OpAdd16(v)
case OpAdd32:
- return rewriteValueARM64_OpAdd32(v, config)
+ return rewriteValueARM64_OpAdd32(v)
case OpAdd32F:
- return rewriteValueARM64_OpAdd32F(v, config)
+ return rewriteValueARM64_OpAdd32F(v)
case OpAdd64:
- return rewriteValueARM64_OpAdd64(v, config)
+ return rewriteValueARM64_OpAdd64(v)
case OpAdd64F:
- return rewriteValueARM64_OpAdd64F(v, config)
+ return rewriteValueARM64_OpAdd64F(v)
case OpAdd8:
- return rewriteValueARM64_OpAdd8(v, config)
+ return rewriteValueARM64_OpAdd8(v)
case OpAddPtr:
- return rewriteValueARM64_OpAddPtr(v, config)
+ return rewriteValueARM64_OpAddPtr(v)
case OpAddr:
- return rewriteValueARM64_OpAddr(v, config)
+ return rewriteValueARM64_OpAddr(v)
case OpAnd16:
- return rewriteValueARM64_OpAnd16(v, config)
+ return rewriteValueARM64_OpAnd16(v)
case OpAnd32:
- return rewriteValueARM64_OpAnd32(v, config)
+ return rewriteValueARM64_OpAnd32(v)
case OpAnd64:
- return rewriteValueARM64_OpAnd64(v, config)
+ return rewriteValueARM64_OpAnd64(v)
case OpAnd8:
- return rewriteValueARM64_OpAnd8(v, config)
+ return rewriteValueARM64_OpAnd8(v)
case OpAndB:
- return rewriteValueARM64_OpAndB(v, config)
+ return rewriteValueARM64_OpAndB(v)
case OpAtomicAdd32:
- return rewriteValueARM64_OpAtomicAdd32(v, config)
+ return rewriteValueARM64_OpAtomicAdd32(v)
case OpAtomicAdd64:
- return rewriteValueARM64_OpAtomicAdd64(v, config)
+ return rewriteValueARM64_OpAtomicAdd64(v)
case OpAtomicAnd8:
- return rewriteValueARM64_OpAtomicAnd8(v, config)
+ return rewriteValueARM64_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
- return rewriteValueARM64_OpAtomicCompareAndSwap32(v, config)
+ return rewriteValueARM64_OpAtomicCompareAndSwap32(v)
case OpAtomicCompareAndSwap64:
- return rewriteValueARM64_OpAtomicCompareAndSwap64(v, config)
+ return rewriteValueARM64_OpAtomicCompareAndSwap64(v)
case OpAtomicExchange32:
- return rewriteValueARM64_OpAtomicExchange32(v, config)
+ return rewriteValueARM64_OpAtomicExchange32(v)
case OpAtomicExchange64:
- return rewriteValueARM64_OpAtomicExchange64(v, config)
+ return rewriteValueARM64_OpAtomicExchange64(v)
case OpAtomicLoad32:
- return rewriteValueARM64_OpAtomicLoad32(v, config)
+ return rewriteValueARM64_OpAtomicLoad32(v)
case OpAtomicLoad64:
- return rewriteValueARM64_OpAtomicLoad64(v, config)
+ return rewriteValueARM64_OpAtomicLoad64(v)
case OpAtomicLoadPtr:
- return rewriteValueARM64_OpAtomicLoadPtr(v, config)
+ return rewriteValueARM64_OpAtomicLoadPtr(v)
case OpAtomicOr8:
- return rewriteValueARM64_OpAtomicOr8(v, config)
+ return rewriteValueARM64_OpAtomicOr8(v)
case OpAtomicStore32:
- return rewriteValueARM64_OpAtomicStore32(v, config)
+ return rewriteValueARM64_OpAtomicStore32(v)
case OpAtomicStore64:
- return rewriteValueARM64_OpAtomicStore64(v, config)
+ return rewriteValueARM64_OpAtomicStore64(v)
case OpAtomicStorePtrNoWB:
- return rewriteValueARM64_OpAtomicStorePtrNoWB(v, config)
+ return rewriteValueARM64_OpAtomicStorePtrNoWB(v)
case OpAvg64u:
- return rewriteValueARM64_OpAvg64u(v, config)
+ return rewriteValueARM64_OpAvg64u(v)
case OpBitLen64:
- return rewriteValueARM64_OpBitLen64(v, config)
+ return rewriteValueARM64_OpBitLen64(v)
case OpBitRev16:
- return rewriteValueARM64_OpBitRev16(v, config)
+ return rewriteValueARM64_OpBitRev16(v)
case OpBitRev32:
- return rewriteValueARM64_OpBitRev32(v, config)
+ return rewriteValueARM64_OpBitRev32(v)
case OpBitRev64:
- return rewriteValueARM64_OpBitRev64(v, config)
+ return rewriteValueARM64_OpBitRev64(v)
case OpBitRev8:
- return rewriteValueARM64_OpBitRev8(v, config)
+ return rewriteValueARM64_OpBitRev8(v)
case OpBswap32:
- return rewriteValueARM64_OpBswap32(v, config)
+ return rewriteValueARM64_OpBswap32(v)
case OpBswap64:
- return rewriteValueARM64_OpBswap64(v, config)
+ return rewriteValueARM64_OpBswap64(v)
case OpClosureCall:
- return rewriteValueARM64_OpClosureCall(v, config)
+ return rewriteValueARM64_OpClosureCall(v)
case OpCom16:
- return rewriteValueARM64_OpCom16(v, config)
+ return rewriteValueARM64_OpCom16(v)
case OpCom32:
- return rewriteValueARM64_OpCom32(v, config)
+ return rewriteValueARM64_OpCom32(v)
case OpCom64:
- return rewriteValueARM64_OpCom64(v, config)
+ return rewriteValueARM64_OpCom64(v)
case OpCom8:
- return rewriteValueARM64_OpCom8(v, config)
+ return rewriteValueARM64_OpCom8(v)
case OpConst16:
- return rewriteValueARM64_OpConst16(v, config)
+ return rewriteValueARM64_OpConst16(v)
case OpConst32:
- return rewriteValueARM64_OpConst32(v, config)
+ return rewriteValueARM64_OpConst32(v)
case OpConst32F:
- return rewriteValueARM64_OpConst32F(v, config)
+ return rewriteValueARM64_OpConst32F(v)
case OpConst64:
- return rewriteValueARM64_OpConst64(v, config)
+ return rewriteValueARM64_OpConst64(v)
case OpConst64F:
- return rewriteValueARM64_OpConst64F(v, config)
+ return rewriteValueARM64_OpConst64F(v)
case OpConst8:
- return rewriteValueARM64_OpConst8(v, config)
+ return rewriteValueARM64_OpConst8(v)
case OpConstBool:
- return rewriteValueARM64_OpConstBool(v, config)
+ return rewriteValueARM64_OpConstBool(v)
case OpConstNil:
- return rewriteValueARM64_OpConstNil(v, config)
+ return rewriteValueARM64_OpConstNil(v)
case OpConvert:
- return rewriteValueARM64_OpConvert(v, config)
+ return rewriteValueARM64_OpConvert(v)
case OpCtz32:
- return rewriteValueARM64_OpCtz32(v, config)
+ return rewriteValueARM64_OpCtz32(v)
case OpCtz64:
- return rewriteValueARM64_OpCtz64(v, config)
+ return rewriteValueARM64_OpCtz64(v)
case OpCvt32Fto32:
- return rewriteValueARM64_OpCvt32Fto32(v, config)
+ return rewriteValueARM64_OpCvt32Fto32(v)
case OpCvt32Fto32U:
- return rewriteValueARM64_OpCvt32Fto32U(v, config)
+ return rewriteValueARM64_OpCvt32Fto32U(v)
case OpCvt32Fto64:
- return rewriteValueARM64_OpCvt32Fto64(v, config)
+ return rewriteValueARM64_OpCvt32Fto64(v)
case OpCvt32Fto64F:
- return rewriteValueARM64_OpCvt32Fto64F(v, config)
+ return rewriteValueARM64_OpCvt32Fto64F(v)
case OpCvt32Fto64U:
- return rewriteValueARM64_OpCvt32Fto64U(v, config)
+ return rewriteValueARM64_OpCvt32Fto64U(v)
case OpCvt32Uto32F:
- return rewriteValueARM64_OpCvt32Uto32F(v, config)
+ return rewriteValueARM64_OpCvt32Uto32F(v)
case OpCvt32Uto64F:
- return rewriteValueARM64_OpCvt32Uto64F(v, config)
+ return rewriteValueARM64_OpCvt32Uto64F(v)
case OpCvt32to32F:
- return rewriteValueARM64_OpCvt32to32F(v, config)
+ return rewriteValueARM64_OpCvt32to32F(v)
case OpCvt32to64F:
- return rewriteValueARM64_OpCvt32to64F(v, config)
+ return rewriteValueARM64_OpCvt32to64F(v)
case OpCvt64Fto32:
- return rewriteValueARM64_OpCvt64Fto32(v, config)
+ return rewriteValueARM64_OpCvt64Fto32(v)
case OpCvt64Fto32F:
- return rewriteValueARM64_OpCvt64Fto32F(v, config)
+ return rewriteValueARM64_OpCvt64Fto32F(v)
case OpCvt64Fto32U:
- return rewriteValueARM64_OpCvt64Fto32U(v, config)
+ return rewriteValueARM64_OpCvt64Fto32U(v)
case OpCvt64Fto64:
- return rewriteValueARM64_OpCvt64Fto64(v, config)
+ return rewriteValueARM64_OpCvt64Fto64(v)
case OpCvt64Fto64U:
- return rewriteValueARM64_OpCvt64Fto64U(v, config)
+ return rewriteValueARM64_OpCvt64Fto64U(v)
case OpCvt64Uto32F:
- return rewriteValueARM64_OpCvt64Uto32F(v, config)
+ return rewriteValueARM64_OpCvt64Uto32F(v)
case OpCvt64Uto64F:
- return rewriteValueARM64_OpCvt64Uto64F(v, config)
+ return rewriteValueARM64_OpCvt64Uto64F(v)
case OpCvt64to32F:
- return rewriteValueARM64_OpCvt64to32F(v, config)
+ return rewriteValueARM64_OpCvt64to32F(v)
case OpCvt64to64F:
- return rewriteValueARM64_OpCvt64to64F(v, config)
+ return rewriteValueARM64_OpCvt64to64F(v)
case OpDiv16:
- return rewriteValueARM64_OpDiv16(v, config)
+ return rewriteValueARM64_OpDiv16(v)
case OpDiv16u:
- return rewriteValueARM64_OpDiv16u(v, config)
+ return rewriteValueARM64_OpDiv16u(v)
case OpDiv32:
- return rewriteValueARM64_OpDiv32(v, config)
+ return rewriteValueARM64_OpDiv32(v)
case OpDiv32F:
- return rewriteValueARM64_OpDiv32F(v, config)
+ return rewriteValueARM64_OpDiv32F(v)
case OpDiv32u:
- return rewriteValueARM64_OpDiv32u(v, config)
+ return rewriteValueARM64_OpDiv32u(v)
case OpDiv64:
- return rewriteValueARM64_OpDiv64(v, config)
+ return rewriteValueARM64_OpDiv64(v)
case OpDiv64F:
- return rewriteValueARM64_OpDiv64F(v, config)
+ return rewriteValueARM64_OpDiv64F(v)
case OpDiv64u:
- return rewriteValueARM64_OpDiv64u(v, config)
+ return rewriteValueARM64_OpDiv64u(v)
case OpDiv8:
- return rewriteValueARM64_OpDiv8(v, config)
+ return rewriteValueARM64_OpDiv8(v)
case OpDiv8u:
- return rewriteValueARM64_OpDiv8u(v, config)
+ return rewriteValueARM64_OpDiv8u(v)
case OpEq16:
- return rewriteValueARM64_OpEq16(v, config)
+ return rewriteValueARM64_OpEq16(v)
case OpEq32:
- return rewriteValueARM64_OpEq32(v, config)
+ return rewriteValueARM64_OpEq32(v)
case OpEq32F:
- return rewriteValueARM64_OpEq32F(v, config)
+ return rewriteValueARM64_OpEq32F(v)
case OpEq64:
- return rewriteValueARM64_OpEq64(v, config)
+ return rewriteValueARM64_OpEq64(v)
case OpEq64F:
- return rewriteValueARM64_OpEq64F(v, config)
+ return rewriteValueARM64_OpEq64F(v)
case OpEq8:
- return rewriteValueARM64_OpEq8(v, config)
+ return rewriteValueARM64_OpEq8(v)
case OpEqB:
- return rewriteValueARM64_OpEqB(v, config)
+ return rewriteValueARM64_OpEqB(v)
case OpEqPtr:
- return rewriteValueARM64_OpEqPtr(v, config)
+ return rewriteValueARM64_OpEqPtr(v)
case OpGeq16:
- return rewriteValueARM64_OpGeq16(v, config)
+ return rewriteValueARM64_OpGeq16(v)
case OpGeq16U:
- return rewriteValueARM64_OpGeq16U(v, config)
+ return rewriteValueARM64_OpGeq16U(v)
case OpGeq32:
- return rewriteValueARM64_OpGeq32(v, config)
+ return rewriteValueARM64_OpGeq32(v)
case OpGeq32F:
- return rewriteValueARM64_OpGeq32F(v, config)
+ return rewriteValueARM64_OpGeq32F(v)
case OpGeq32U:
- return rewriteValueARM64_OpGeq32U(v, config)
+ return rewriteValueARM64_OpGeq32U(v)
case OpGeq64:
- return rewriteValueARM64_OpGeq64(v, config)
+ return rewriteValueARM64_OpGeq64(v)
case OpGeq64F:
- return rewriteValueARM64_OpGeq64F(v, config)
+ return rewriteValueARM64_OpGeq64F(v)
case OpGeq64U:
- return rewriteValueARM64_OpGeq64U(v, config)
+ return rewriteValueARM64_OpGeq64U(v)
case OpGeq8:
- return rewriteValueARM64_OpGeq8(v, config)
+ return rewriteValueARM64_OpGeq8(v)
case OpGeq8U:
- return rewriteValueARM64_OpGeq8U(v, config)
+ return rewriteValueARM64_OpGeq8U(v)
case OpGetClosurePtr:
- return rewriteValueARM64_OpGetClosurePtr(v, config)
+ return rewriteValueARM64_OpGetClosurePtr(v)
case OpGreater16:
- return rewriteValueARM64_OpGreater16(v, config)
+ return rewriteValueARM64_OpGreater16(v)
case OpGreater16U:
- return rewriteValueARM64_OpGreater16U(v, config)
+ return rewriteValueARM64_OpGreater16U(v)
case OpGreater32:
- return rewriteValueARM64_OpGreater32(v, config)
+ return rewriteValueARM64_OpGreater32(v)
case OpGreater32F:
- return rewriteValueARM64_OpGreater32F(v, config)
+ return rewriteValueARM64_OpGreater32F(v)
case OpGreater32U:
- return rewriteValueARM64_OpGreater32U(v, config)
+ return rewriteValueARM64_OpGreater32U(v)
case OpGreater64:
- return rewriteValueARM64_OpGreater64(v, config)
+ return rewriteValueARM64_OpGreater64(v)
case OpGreater64F:
- return rewriteValueARM64_OpGreater64F(v, config)
+ return rewriteValueARM64_OpGreater64F(v)
case OpGreater64U:
- return rewriteValueARM64_OpGreater64U(v, config)
+ return rewriteValueARM64_OpGreater64U(v)
case OpGreater8:
- return rewriteValueARM64_OpGreater8(v, config)
+ return rewriteValueARM64_OpGreater8(v)
case OpGreater8U:
- return rewriteValueARM64_OpGreater8U(v, config)
+ return rewriteValueARM64_OpGreater8U(v)
case OpHmul32:
- return rewriteValueARM64_OpHmul32(v, config)
+ return rewriteValueARM64_OpHmul32(v)
case OpHmul32u:
- return rewriteValueARM64_OpHmul32u(v, config)
+ return rewriteValueARM64_OpHmul32u(v)
case OpHmul64:
- return rewriteValueARM64_OpHmul64(v, config)
+ return rewriteValueARM64_OpHmul64(v)
case OpHmul64u:
- return rewriteValueARM64_OpHmul64u(v, config)
+ return rewriteValueARM64_OpHmul64u(v)
case OpInterCall:
- return rewriteValueARM64_OpInterCall(v, config)
+ return rewriteValueARM64_OpInterCall(v)
case OpIsInBounds:
- return rewriteValueARM64_OpIsInBounds(v, config)
+ return rewriteValueARM64_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValueARM64_OpIsNonNil(v, config)
+ return rewriteValueARM64_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValueARM64_OpIsSliceInBounds(v, config)
+ return rewriteValueARM64_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValueARM64_OpLeq16(v, config)
+ return rewriteValueARM64_OpLeq16(v)
case OpLeq16U:
- return rewriteValueARM64_OpLeq16U(v, config)
+ return rewriteValueARM64_OpLeq16U(v)
case OpLeq32:
- return rewriteValueARM64_OpLeq32(v, config)
+ return rewriteValueARM64_OpLeq32(v)
case OpLeq32F:
- return rewriteValueARM64_OpLeq32F(v, config)
+ return rewriteValueARM64_OpLeq32F(v)
case OpLeq32U:
- return rewriteValueARM64_OpLeq32U(v, config)
+ return rewriteValueARM64_OpLeq32U(v)
case OpLeq64:
- return rewriteValueARM64_OpLeq64(v, config)
+ return rewriteValueARM64_OpLeq64(v)
case OpLeq64F:
- return rewriteValueARM64_OpLeq64F(v, config)
+ return rewriteValueARM64_OpLeq64F(v)
case OpLeq64U:
- return rewriteValueARM64_OpLeq64U(v, config)
+ return rewriteValueARM64_OpLeq64U(v)
case OpLeq8:
- return rewriteValueARM64_OpLeq8(v, config)
+ return rewriteValueARM64_OpLeq8(v)
case OpLeq8U:
- return rewriteValueARM64_OpLeq8U(v, config)
+ return rewriteValueARM64_OpLeq8U(v)
case OpLess16:
- return rewriteValueARM64_OpLess16(v, config)
+ return rewriteValueARM64_OpLess16(v)
case OpLess16U:
- return rewriteValueARM64_OpLess16U(v, config)
+ return rewriteValueARM64_OpLess16U(v)
case OpLess32:
- return rewriteValueARM64_OpLess32(v, config)
+ return rewriteValueARM64_OpLess32(v)
case OpLess32F:
- return rewriteValueARM64_OpLess32F(v, config)
+ return rewriteValueARM64_OpLess32F(v)
case OpLess32U:
- return rewriteValueARM64_OpLess32U(v, config)
+ return rewriteValueARM64_OpLess32U(v)
case OpLess64:
- return rewriteValueARM64_OpLess64(v, config)
+ return rewriteValueARM64_OpLess64(v)
case OpLess64F:
- return rewriteValueARM64_OpLess64F(v, config)
+ return rewriteValueARM64_OpLess64F(v)
case OpLess64U:
- return rewriteValueARM64_OpLess64U(v, config)
+ return rewriteValueARM64_OpLess64U(v)
case OpLess8:
- return rewriteValueARM64_OpLess8(v, config)
+ return rewriteValueARM64_OpLess8(v)
case OpLess8U:
- return rewriteValueARM64_OpLess8U(v, config)
+ return rewriteValueARM64_OpLess8U(v)
case OpLoad:
- return rewriteValueARM64_OpLoad(v, config)
+ return rewriteValueARM64_OpLoad(v)
case OpLsh16x16:
- return rewriteValueARM64_OpLsh16x16(v, config)
+ return rewriteValueARM64_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValueARM64_OpLsh16x32(v, config)
+ return rewriteValueARM64_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValueARM64_OpLsh16x64(v, config)
+ return rewriteValueARM64_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValueARM64_OpLsh16x8(v, config)
+ return rewriteValueARM64_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValueARM64_OpLsh32x16(v, config)
+ return rewriteValueARM64_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValueARM64_OpLsh32x32(v, config)
+ return rewriteValueARM64_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValueARM64_OpLsh32x64(v, config)
+ return rewriteValueARM64_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValueARM64_OpLsh32x8(v, config)
+ return rewriteValueARM64_OpLsh32x8(v)
case OpLsh64x16:
- return rewriteValueARM64_OpLsh64x16(v, config)
+ return rewriteValueARM64_OpLsh64x16(v)
case OpLsh64x32:
- return rewriteValueARM64_OpLsh64x32(v, config)
+ return rewriteValueARM64_OpLsh64x32(v)
case OpLsh64x64:
- return rewriteValueARM64_OpLsh64x64(v, config)
+ return rewriteValueARM64_OpLsh64x64(v)
case OpLsh64x8:
- return rewriteValueARM64_OpLsh64x8(v, config)
+ return rewriteValueARM64_OpLsh64x8(v)
case OpLsh8x16:
- return rewriteValueARM64_OpLsh8x16(v, config)
+ return rewriteValueARM64_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValueARM64_OpLsh8x32(v, config)
+ return rewriteValueARM64_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValueARM64_OpLsh8x64(v, config)
+ return rewriteValueARM64_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValueARM64_OpLsh8x8(v, config)
+ return rewriteValueARM64_OpLsh8x8(v)
case OpMod16:
- return rewriteValueARM64_OpMod16(v, config)
+ return rewriteValueARM64_OpMod16(v)
case OpMod16u:
- return rewriteValueARM64_OpMod16u(v, config)
+ return rewriteValueARM64_OpMod16u(v)
case OpMod32:
- return rewriteValueARM64_OpMod32(v, config)
+ return rewriteValueARM64_OpMod32(v)
case OpMod32u:
- return rewriteValueARM64_OpMod32u(v, config)
+ return rewriteValueARM64_OpMod32u(v)
case OpMod64:
- return rewriteValueARM64_OpMod64(v, config)
+ return rewriteValueARM64_OpMod64(v)
case OpMod64u:
- return rewriteValueARM64_OpMod64u(v, config)
+ return rewriteValueARM64_OpMod64u(v)
case OpMod8:
- return rewriteValueARM64_OpMod8(v, config)
+ return rewriteValueARM64_OpMod8(v)
case OpMod8u:
- return rewriteValueARM64_OpMod8u(v, config)
+ return rewriteValueARM64_OpMod8u(v)
case OpMove:
- return rewriteValueARM64_OpMove(v, config)
+ return rewriteValueARM64_OpMove(v)
case OpMul16:
- return rewriteValueARM64_OpMul16(v, config)
+ return rewriteValueARM64_OpMul16(v)
case OpMul32:
- return rewriteValueARM64_OpMul32(v, config)
+ return rewriteValueARM64_OpMul32(v)
case OpMul32F:
- return rewriteValueARM64_OpMul32F(v, config)
+ return rewriteValueARM64_OpMul32F(v)
case OpMul64:
- return rewriteValueARM64_OpMul64(v, config)
+ return rewriteValueARM64_OpMul64(v)
case OpMul64F:
- return rewriteValueARM64_OpMul64F(v, config)
+ return rewriteValueARM64_OpMul64F(v)
case OpMul8:
- return rewriteValueARM64_OpMul8(v, config)
+ return rewriteValueARM64_OpMul8(v)
case OpNeg16:
- return rewriteValueARM64_OpNeg16(v, config)
+ return rewriteValueARM64_OpNeg16(v)
case OpNeg32:
- return rewriteValueARM64_OpNeg32(v, config)
+ return rewriteValueARM64_OpNeg32(v)
case OpNeg32F:
- return rewriteValueARM64_OpNeg32F(v, config)
+ return rewriteValueARM64_OpNeg32F(v)
case OpNeg64:
- return rewriteValueARM64_OpNeg64(v, config)
+ return rewriteValueARM64_OpNeg64(v)
case OpNeg64F:
- return rewriteValueARM64_OpNeg64F(v, config)
+ return rewriteValueARM64_OpNeg64F(v)
case OpNeg8:
- return rewriteValueARM64_OpNeg8(v, config)
+ return rewriteValueARM64_OpNeg8(v)
case OpNeq16:
- return rewriteValueARM64_OpNeq16(v, config)
+ return rewriteValueARM64_OpNeq16(v)
case OpNeq32:
- return rewriteValueARM64_OpNeq32(v, config)
+ return rewriteValueARM64_OpNeq32(v)
case OpNeq32F:
- return rewriteValueARM64_OpNeq32F(v, config)
+ return rewriteValueARM64_OpNeq32F(v)
case OpNeq64:
- return rewriteValueARM64_OpNeq64(v, config)
+ return rewriteValueARM64_OpNeq64(v)
case OpNeq64F:
- return rewriteValueARM64_OpNeq64F(v, config)
+ return rewriteValueARM64_OpNeq64F(v)
case OpNeq8:
- return rewriteValueARM64_OpNeq8(v, config)
+ return rewriteValueARM64_OpNeq8(v)
case OpNeqB:
- return rewriteValueARM64_OpNeqB(v, config)
+ return rewriteValueARM64_OpNeqB(v)
case OpNeqPtr:
- return rewriteValueARM64_OpNeqPtr(v, config)
+ return rewriteValueARM64_OpNeqPtr(v)
case OpNilCheck:
- return rewriteValueARM64_OpNilCheck(v, config)
+ return rewriteValueARM64_OpNilCheck(v)
case OpNot:
- return rewriteValueARM64_OpNot(v, config)
+ return rewriteValueARM64_OpNot(v)
case OpOffPtr:
- return rewriteValueARM64_OpOffPtr(v, config)
+ return rewriteValueARM64_OpOffPtr(v)
case OpOr16:
- return rewriteValueARM64_OpOr16(v, config)
+ return rewriteValueARM64_OpOr16(v)
case OpOr32:
- return rewriteValueARM64_OpOr32(v, config)
+ return rewriteValueARM64_OpOr32(v)
case OpOr64:
- return rewriteValueARM64_OpOr64(v, config)
+ return rewriteValueARM64_OpOr64(v)
case OpOr8:
- return rewriteValueARM64_OpOr8(v, config)
+ return rewriteValueARM64_OpOr8(v)
case OpOrB:
- return rewriteValueARM64_OpOrB(v, config)
+ return rewriteValueARM64_OpOrB(v)
case OpRound32F:
- return rewriteValueARM64_OpRound32F(v, config)
+ return rewriteValueARM64_OpRound32F(v)
case OpRound64F:
- return rewriteValueARM64_OpRound64F(v, config)
+ return rewriteValueARM64_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValueARM64_OpRsh16Ux16(v, config)
+ return rewriteValueARM64_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValueARM64_OpRsh16Ux32(v, config)
+ return rewriteValueARM64_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValueARM64_OpRsh16Ux64(v, config)
+ return rewriteValueARM64_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValueARM64_OpRsh16Ux8(v, config)
+ return rewriteValueARM64_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValueARM64_OpRsh16x16(v, config)
+ return rewriteValueARM64_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValueARM64_OpRsh16x32(v, config)
+ return rewriteValueARM64_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValueARM64_OpRsh16x64(v, config)
+ return rewriteValueARM64_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValueARM64_OpRsh16x8(v, config)
+ return rewriteValueARM64_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValueARM64_OpRsh32Ux16(v, config)
+ return rewriteValueARM64_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValueARM64_OpRsh32Ux32(v, config)
+ return rewriteValueARM64_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValueARM64_OpRsh32Ux64(v, config)
+ return rewriteValueARM64_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValueARM64_OpRsh32Ux8(v, config)
+ return rewriteValueARM64_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValueARM64_OpRsh32x16(v, config)
+ return rewriteValueARM64_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValueARM64_OpRsh32x32(v, config)
+ return rewriteValueARM64_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValueARM64_OpRsh32x64(v, config)
+ return rewriteValueARM64_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValueARM64_OpRsh32x8(v, config)
+ return rewriteValueARM64_OpRsh32x8(v)
case OpRsh64Ux16:
- return rewriteValueARM64_OpRsh64Ux16(v, config)
+ return rewriteValueARM64_OpRsh64Ux16(v)
case OpRsh64Ux32:
- return rewriteValueARM64_OpRsh64Ux32(v, config)
+ return rewriteValueARM64_OpRsh64Ux32(v)
case OpRsh64Ux64:
- return rewriteValueARM64_OpRsh64Ux64(v, config)
+ return rewriteValueARM64_OpRsh64Ux64(v)
case OpRsh64Ux8:
- return rewriteValueARM64_OpRsh64Ux8(v, config)
+ return rewriteValueARM64_OpRsh64Ux8(v)
case OpRsh64x16:
- return rewriteValueARM64_OpRsh64x16(v, config)
+ return rewriteValueARM64_OpRsh64x16(v)
case OpRsh64x32:
- return rewriteValueARM64_OpRsh64x32(v, config)
+ return rewriteValueARM64_OpRsh64x32(v)
case OpRsh64x64:
- return rewriteValueARM64_OpRsh64x64(v, config)
+ return rewriteValueARM64_OpRsh64x64(v)
case OpRsh64x8:
- return rewriteValueARM64_OpRsh64x8(v, config)
+ return rewriteValueARM64_OpRsh64x8(v)
case OpRsh8Ux16:
- return rewriteValueARM64_OpRsh8Ux16(v, config)
+ return rewriteValueARM64_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValueARM64_OpRsh8Ux32(v, config)
+ return rewriteValueARM64_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValueARM64_OpRsh8Ux64(v, config)
+ return rewriteValueARM64_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValueARM64_OpRsh8Ux8(v, config)
+ return rewriteValueARM64_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValueARM64_OpRsh8x16(v, config)
+ return rewriteValueARM64_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValueARM64_OpRsh8x32(v, config)
+ return rewriteValueARM64_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValueARM64_OpRsh8x64(v, config)
+ return rewriteValueARM64_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValueARM64_OpRsh8x8(v, config)
+ return rewriteValueARM64_OpRsh8x8(v)
case OpSignExt16to32:
- return rewriteValueARM64_OpSignExt16to32(v, config)
+ return rewriteValueARM64_OpSignExt16to32(v)
case OpSignExt16to64:
- return rewriteValueARM64_OpSignExt16to64(v, config)
+ return rewriteValueARM64_OpSignExt16to64(v)
case OpSignExt32to64:
- return rewriteValueARM64_OpSignExt32to64(v, config)
+ return rewriteValueARM64_OpSignExt32to64(v)
case OpSignExt8to16:
- return rewriteValueARM64_OpSignExt8to16(v, config)
+ return rewriteValueARM64_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValueARM64_OpSignExt8to32(v, config)
+ return rewriteValueARM64_OpSignExt8to32(v)
case OpSignExt8to64:
- return rewriteValueARM64_OpSignExt8to64(v, config)
+ return rewriteValueARM64_OpSignExt8to64(v)
case OpSlicemask:
- return rewriteValueARM64_OpSlicemask(v, config)
+ return rewriteValueARM64_OpSlicemask(v)
case OpSqrt:
- return rewriteValueARM64_OpSqrt(v, config)
+ return rewriteValueARM64_OpSqrt(v)
case OpStaticCall:
- return rewriteValueARM64_OpStaticCall(v, config)
+ return rewriteValueARM64_OpStaticCall(v)
case OpStore:
- return rewriteValueARM64_OpStore(v, config)
+ return rewriteValueARM64_OpStore(v)
case OpSub16:
- return rewriteValueARM64_OpSub16(v, config)
+ return rewriteValueARM64_OpSub16(v)
case OpSub32:
- return rewriteValueARM64_OpSub32(v, config)
+ return rewriteValueARM64_OpSub32(v)
case OpSub32F:
- return rewriteValueARM64_OpSub32F(v, config)
+ return rewriteValueARM64_OpSub32F(v)
case OpSub64:
- return rewriteValueARM64_OpSub64(v, config)
+ return rewriteValueARM64_OpSub64(v)
case OpSub64F:
- return rewriteValueARM64_OpSub64F(v, config)
+ return rewriteValueARM64_OpSub64F(v)
case OpSub8:
- return rewriteValueARM64_OpSub8(v, config)
+ return rewriteValueARM64_OpSub8(v)
case OpSubPtr:
- return rewriteValueARM64_OpSubPtr(v, config)
+ return rewriteValueARM64_OpSubPtr(v)
case OpTrunc16to8:
- return rewriteValueARM64_OpTrunc16to8(v, config)
+ return rewriteValueARM64_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValueARM64_OpTrunc32to16(v, config)
+ return rewriteValueARM64_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValueARM64_OpTrunc32to8(v, config)
+ return rewriteValueARM64_OpTrunc32to8(v)
case OpTrunc64to16:
- return rewriteValueARM64_OpTrunc64to16(v, config)
+ return rewriteValueARM64_OpTrunc64to16(v)
case OpTrunc64to32:
- return rewriteValueARM64_OpTrunc64to32(v, config)
+ return rewriteValueARM64_OpTrunc64to32(v)
case OpTrunc64to8:
- return rewriteValueARM64_OpTrunc64to8(v, config)
+ return rewriteValueARM64_OpTrunc64to8(v)
case OpXor16:
- return rewriteValueARM64_OpXor16(v, config)
+ return rewriteValueARM64_OpXor16(v)
case OpXor32:
- return rewriteValueARM64_OpXor32(v, config)
+ return rewriteValueARM64_OpXor32(v)
case OpXor64:
- return rewriteValueARM64_OpXor64(v, config)
+ return rewriteValueARM64_OpXor64(v)
case OpXor8:
- return rewriteValueARM64_OpXor8(v, config)
+ return rewriteValueARM64_OpXor8(v)
case OpZero:
- return rewriteValueARM64_OpZero(v, config)
+ return rewriteValueARM64_OpZero(v)
case OpZeroExt16to32:
- return rewriteValueARM64_OpZeroExt16to32(v, config)
+ return rewriteValueARM64_OpZeroExt16to32(v)
case OpZeroExt16to64:
- return rewriteValueARM64_OpZeroExt16to64(v, config)
+ return rewriteValueARM64_OpZeroExt16to64(v)
case OpZeroExt32to64:
- return rewriteValueARM64_OpZeroExt32to64(v, config)
+ return rewriteValueARM64_OpZeroExt32to64(v)
case OpZeroExt8to16:
- return rewriteValueARM64_OpZeroExt8to16(v, config)
+ return rewriteValueARM64_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValueARM64_OpZeroExt8to32(v, config)
+ return rewriteValueARM64_OpZeroExt8to32(v)
case OpZeroExt8to64:
- return rewriteValueARM64_OpZeroExt8to64(v, config)
+ return rewriteValueARM64_OpZeroExt8to64(v)
}
return false
}
-func rewriteValueARM64_OpARM64ADD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64ADD(v *Value) bool {
// match: (ADD (MOVDconst [c]) x)
// cond:
// result: (ADDconst [c] x)
}
return false
}
-func rewriteValueARM64_OpARM64ADDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64ADDconst(v *Value) bool {
// match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
// cond:
// result: (MOVDaddr [off1+off2] {sym} ptr)
}
return false
}
-func rewriteValueARM64_OpARM64ADDshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftLL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64ADDshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ADDshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftRA (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64ADDshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (ADDshiftRL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64AND(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64AND(v *Value) bool {
// match: (AND (MOVDconst [c]) x)
// cond:
// result: (ANDconst [c] x)
}
return false
}
-func rewriteValueARM64_OpARM64ANDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
// match: (ANDconst [0] _)
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64ANDshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftLL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64ANDshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftRA (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64ANDshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (ANDshiftRL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64BIC(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64BIC(v *Value) bool {
// match: (BIC x (MOVDconst [c]))
// cond:
// result: (BICconst [c] x)
}
return false
}
-func rewriteValueARM64_OpARM64BICconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64BICconst(v *Value) bool {
// match: (BICconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueARM64_OpARM64BICshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool {
// match: (BICshiftLL x (MOVDconst [c]) [d])
// cond:
// result: (BICconst x [int64(uint64(c)<<uint64(d))])
}
return false
}
-func rewriteValueARM64_OpARM64BICshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64BICshiftRA(v *Value) bool {
// match: (BICshiftRA x (MOVDconst [c]) [d])
// cond:
// result: (BICconst x [int64(int64(c)>>uint64(d))])
}
return false
}
-func rewriteValueARM64_OpARM64BICshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool {
// match: (BICshiftRL x (MOVDconst [c]) [d])
// cond:
// result: (BICconst x [int64(uint64(c)>>uint64(d))])
}
return false
}
-func rewriteValueARM64_OpARM64CMP(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMP(v *Value) bool {
b := v.Block
_ = b
// match: (CMP x (MOVDconst [c]))
}
return false
}
-func rewriteValueARM64_OpARM64CMPW(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMPW(v *Value) bool {
b := v.Block
_ = b
// match: (CMPW x (MOVDconst [c]))
}
return false
}
-func rewriteValueARM64_OpARM64CMPWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64CMPWconst(v *Value) bool {
// match: (CMPWconst (MOVDconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValueARM64_OpARM64CMPconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64CMPconst(v *Value) bool {
// match: (CMPconst (MOVDconst [x]) [y])
// cond: x==y
// result: (FlagEQ)
}
return false
}
-func rewriteValueARM64_OpARM64CMPshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMPshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftLL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64CMPshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMPshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftRA (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64CMPshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64CMPshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (CMPshiftRL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64CSELULT(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64CSELULT(v *Value) bool {
// match: (CSELULT x (MOVDconst [0]) flag)
// cond:
// result: (CSELULT0 x flag)
}
return false
}
-func rewriteValueARM64_OpARM64CSELULT0(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64CSELULT0(v *Value) bool {
// match: (CSELULT0 _ (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64DIV(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64DIV(v *Value) bool {
// match: (DIV (MOVDconst [c]) (MOVDconst [d]))
// cond:
// result: (MOVDconst [int64(c)/int64(d)])
}
return false
}
-func rewriteValueARM64_OpARM64DIVW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64DIVW(v *Value) bool {
// match: (DIVW (MOVDconst [c]) (MOVDconst [d]))
// cond:
// result: (MOVDconst [int64(int32(c)/int32(d))])
}
return false
}
-func rewriteValueARM64_OpARM64Equal(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64Equal(v *Value) bool {
// match: (Equal (FlagEQ))
// cond:
// result: (MOVDconst [1])
}
return false
}
-func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVDload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVSload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM64_OpARM64GreaterEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64GreaterEqual(v *Value) bool {
// match: (GreaterEqual (FlagEQ))
// cond:
// result: (MOVDconst [1])
}
return false
}
-func rewriteValueARM64_OpARM64GreaterEqualU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64GreaterEqualU(v *Value) bool {
// match: (GreaterEqualU (FlagEQ))
// cond:
// result: (MOVDconst [1])
}
return false
}
-func rewriteValueARM64_OpARM64GreaterThan(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64GreaterThan(v *Value) bool {
// match: (GreaterThan (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64GreaterThanU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64GreaterThanU(v *Value) bool {
// match: (GreaterThanU (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64LessEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64LessEqual(v *Value) bool {
// match: (LessEqual (FlagEQ))
// cond:
// result: (MOVDconst [1])
}
return false
}
-func rewriteValueARM64_OpARM64LessEqualU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64LessEqualU(v *Value) bool {
// match: (LessEqualU (FlagEQ))
// cond:
// result: (MOVDconst [1])
}
return false
}
-func rewriteValueARM64_OpARM64LessThan(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64LessThan(v *Value) bool {
// match: (LessThan (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64LessThanU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64LessThanU(v *Value) bool {
// match: (LessThanU (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64MOD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOD(v *Value) bool {
// match: (MOD (MOVDconst [c]) (MOVDconst [d]))
// cond:
// result: (MOVDconst [int64(c)%int64(d)])
}
return false
}
-func rewriteValueARM64_OpARM64MODW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MODW(v *Value) bool {
// match: (MODW (MOVDconst [c]) (MOVDconst [d]))
// cond:
// result: (MOVDconst [int64(int32(c)%int32(d))])
}
return false
}
-func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
// match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVBUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVBUreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
// match: (MOVBUreg x:(MOVBUload _ _))
// cond:
// result: (MOVDreg x)
}
return false
}
-func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVBload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVBreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
// match: (MOVBreg x:(MOVBload _ _))
// cond:
// result: (MOVDreg x)
}
return false
}
-func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVBstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVDreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool {
// match: (MOVDreg x)
// cond: x.Uses == 1
// result: (MOVDnop x)
}
return false
}
-func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVHUreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
// match: (MOVHUreg x:(MOVBUload _ _))
// cond:
// result: (MOVDreg x)
}
return false
}
-func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVHreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
// match: (MOVHreg x:(MOVBload _ _))
// cond:
// result: (MOVDreg x)
}
return false
}
-func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVWUreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
// match: (MOVWUreg x:(MOVBUload _ _))
// cond:
// result: (MOVDreg x)
}
return false
}
-func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVWreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
// match: (MOVWreg x:(MOVBload _ _))
// cond:
// result: (MOVDreg x)
}
return false
}
-func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueARM64_OpARM64MUL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MUL(v *Value) bool {
b := v.Block
_ = b
// match: (MUL x (MOVDconst [-1]))
}
return false
}
-func rewriteValueARM64_OpARM64MULW(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MULW(v *Value) bool {
b := v.Block
_ = b
// match: (MULW x (MOVDconst [c]))
}
return false
}
-func rewriteValueARM64_OpARM64MVN(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64MVN(v *Value) bool {
// match: (MVN (MOVDconst [c]))
// cond:
// result: (MOVDconst [^c])
}
return false
}
-func rewriteValueARM64_OpARM64NEG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64NEG(v *Value) bool {
// match: (NEG (MOVDconst [c]))
// cond:
// result: (MOVDconst [-c])
}
return false
}
-func rewriteValueARM64_OpARM64NotEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64NotEqual(v *Value) bool {
// match: (NotEqual (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64OR(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64OR(v *Value) bool {
b := v.Block
_ = b
// match: (OR (MOVDconst [c]) x)
}
return false
}
-func rewriteValueARM64_OpARM64ORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64ORconst(v *Value) bool {
// match: (ORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueARM64_OpARM64ORshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftLL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64ORshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftRA (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64ORshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (ORshiftRL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64SLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SLL(v *Value) bool {
// match: (SLL x (MOVDconst [c]))
// cond:
// result: (SLLconst x [c&63])
}
return false
}
-func rewriteValueARM64_OpARM64SLLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
// match: (SLLconst [c] (MOVDconst [d]))
// cond:
// result: (MOVDconst [int64(d)<<uint64(c)])
}
return false
}
-func rewriteValueARM64_OpARM64SRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SRA(v *Value) bool {
// match: (SRA x (MOVDconst [c]))
// cond:
// result: (SRAconst x [c&63])
}
return false
}
-func rewriteValueARM64_OpARM64SRAconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
// match: (SRAconst [c] (MOVDconst [d]))
// cond:
// result: (MOVDconst [int64(d)>>uint64(c)])
}
return false
}
-func rewriteValueARM64_OpARM64SRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SRL(v *Value) bool {
// match: (SRL x (MOVDconst [c]))
// cond:
// result: (SRLconst x [c&63])
}
return false
}
-func rewriteValueARM64_OpARM64SRLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
// match: (SRLconst [c] (MOVDconst [d]))
// cond:
// result: (MOVDconst [int64(uint64(d)>>uint64(c))])
}
return false
}
-func rewriteValueARM64_OpARM64SUB(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64SUB(v *Value) bool {
b := v.Block
_ = b
// match: (SUB x (MOVDconst [c]))
}
return false
}
-func rewriteValueARM64_OpARM64SUBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SUBconst(v *Value) bool {
// match: (SUBconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueARM64_OpARM64SUBshiftLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool {
// match: (SUBshiftLL x (MOVDconst [c]) [d])
// cond:
// result: (SUBconst x [int64(uint64(c)<<uint64(d))])
}
return false
}
-func rewriteValueARM64_OpARM64SUBshiftRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SUBshiftRA(v *Value) bool {
// match: (SUBshiftRA x (MOVDconst [c]) [d])
// cond:
// result: (SUBconst x [int64(int64(c)>>uint64(d))])
}
return false
}
-func rewriteValueARM64_OpARM64SUBshiftRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool {
// match: (SUBshiftRL x (MOVDconst [c]) [d])
// cond:
// result: (SUBconst x [int64(uint64(c)>>uint64(d))])
}
return false
}
-func rewriteValueARM64_OpARM64UDIV(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64UDIV(v *Value) bool {
// match: (UDIV x (MOVDconst [1]))
// cond:
// result: x
}
return false
}
-func rewriteValueARM64_OpARM64UDIVW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
// match: (UDIVW x (MOVDconst [c]))
// cond: uint32(c)==1
// result: x
}
return false
}
-func rewriteValueARM64_OpARM64UMOD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64UMOD(v *Value) bool {
// match: (UMOD _ (MOVDconst [1]))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64UMODW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64UMODW(v *Value) bool {
// match: (UMODW _ (MOVDconst [c]))
// cond: uint32(c)==1
// result: (MOVDconst [0])
}
return false
}
-func rewriteValueARM64_OpARM64XOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64XOR(v *Value) bool {
// match: (XOR (MOVDconst [c]) x)
// cond:
// result: (XORconst [c] x)
}
return false
}
-func rewriteValueARM64_OpARM64XORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpARM64XORconst(v *Value) bool {
// match: (XORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueARM64_OpARM64XORshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftLL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64XORshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftRA (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpARM64XORshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
b := v.Block
_ = b
// match: (XORshiftRL (MOVDconst [c]) x [d])
}
return false
}
-func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAdd16(v *Value) bool {
// match: (Add16 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM64_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAdd32(v *Value) bool {
// match: (Add32 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM64_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAdd32F(v *Value) bool {
// match: (Add32F x y)
// cond:
// result: (FADDS x y)
return true
}
}
-func rewriteValueARM64_OpAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAdd64(v *Value) bool {
// match: (Add64 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM64_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAdd64F(v *Value) bool {
// match: (Add64F x y)
// cond:
// result: (FADDD x y)
return true
}
}
-func rewriteValueARM64_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAdd8(v *Value) bool {
// match: (Add8 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM64_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAddPtr(v *Value) bool {
// match: (AddPtr x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueARM64_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAddr(v *Value) bool {
// match: (Addr {sym} base)
// cond:
// result: (MOVDaddr {sym} base)
return true
}
}
-func rewriteValueARM64_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAnd16(v *Value) bool {
// match: (And16 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM64_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAnd32(v *Value) bool {
// match: (And32 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM64_OpAnd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAnd64(v *Value) bool {
// match: (And64 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM64_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAnd8(v *Value) bool {
// match: (And8 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM64_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAndB(v *Value) bool {
// match: (AndB x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueARM64_OpAtomicAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicAdd32(v *Value) bool {
// match: (AtomicAdd32 ptr val mem)
// cond:
// result: (LoweredAtomicAdd32 ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAtomicAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicAdd64(v *Value) bool {
// match: (AtomicAdd64 ptr val mem)
// cond:
// result: (LoweredAtomicAdd64 ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAtomicAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicAnd8(v *Value) bool {
// match: (AtomicAnd8 ptr val mem)
// cond:
// result: (LoweredAtomicAnd8 ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicCompareAndSwap32(v *Value) bool {
// match: (AtomicCompareAndSwap32 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas32 ptr old new_ mem)
return true
}
}
-func rewriteValueARM64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicCompareAndSwap64(v *Value) bool {
// match: (AtomicCompareAndSwap64 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas64 ptr old new_ mem)
return true
}
}
-func rewriteValueARM64_OpAtomicExchange32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicExchange32(v *Value) bool {
// match: (AtomicExchange32 ptr val mem)
// cond:
// result: (LoweredAtomicExchange32 ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAtomicExchange64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicExchange64(v *Value) bool {
// match: (AtomicExchange64 ptr val mem)
// cond:
// result: (LoweredAtomicExchange64 ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAtomicLoad32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicLoad32(v *Value) bool {
// match: (AtomicLoad32 ptr mem)
// cond:
// result: (LDARW ptr mem)
return true
}
}
-func rewriteValueARM64_OpAtomicLoad64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicLoad64(v *Value) bool {
// match: (AtomicLoad64 ptr mem)
// cond:
// result: (LDAR ptr mem)
return true
}
}
-func rewriteValueARM64_OpAtomicLoadPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicLoadPtr(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
// result: (LDAR ptr mem)
return true
}
}
-func rewriteValueARM64_OpAtomicOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicOr8(v *Value) bool {
// match: (AtomicOr8 ptr val mem)
// cond:
// result: (LoweredAtomicOr8 ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAtomicStore32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicStore32(v *Value) bool {
// match: (AtomicStore32 ptr val mem)
// cond:
// result: (STLRW ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAtomicStore64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicStore64(v *Value) bool {
// match: (AtomicStore64 ptr val mem)
// cond:
// result: (STLR ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpAtomicStorePtrNoWB(v *Value) bool {
// match: (AtomicStorePtrNoWB ptr val mem)
// cond:
// result: (STLR ptr val mem)
return true
}
}
-func rewriteValueARM64_OpAvg64u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAvg64u(v *Value) bool {
b := v.Block
_ = b
// match: (Avg64u <t> x y)
return true
}
}
-func rewriteValueARM64_OpBitLen64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpBitLen64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (BitLen64 x)
// cond:
- // result: (SUB (MOVDconst [64]) (CLZ <config.fe.TypeInt()> x))
+ // result: (SUB (MOVDconst [64]) (CLZ <fe.TypeInt()> x))
for {
x := v.Args[0]
v.reset(OpARM64SUB)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 64
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64CLZ, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpARM64CLZ, fe.TypeInt())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpBitRev16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpBitRev16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (BitRev16 x)
// cond:
- // result: (SRLconst [48] (RBIT <config.fe.TypeUInt64()> x))
+ // result: (SRLconst [48] (RBIT <fe.TypeUInt64()> x))
for {
x := v.Args[0]
v.reset(OpARM64SRLconst)
v.AuxInt = 48
- v0 := b.NewValue0(v.Pos, OpARM64RBIT, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpBitRev32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpBitRev32(v *Value) bool {
// match: (BitRev32 x)
// cond:
// result: (RBITW x)
return true
}
}
-func rewriteValueARM64_OpBitRev64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpBitRev64(v *Value) bool {
// match: (BitRev64 x)
// cond:
// result: (RBIT x)
return true
}
}
-func rewriteValueARM64_OpBitRev8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpBitRev8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (BitRev8 x)
// cond:
- // result: (SRLconst [56] (RBIT <config.fe.TypeUInt64()> x))
+ // result: (SRLconst [56] (RBIT <fe.TypeUInt64()> x))
for {
x := v.Args[0]
v.reset(OpARM64SRLconst)
v.AuxInt = 56
- v0 := b.NewValue0(v.Pos, OpARM64RBIT, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpBswap32(v *Value) bool {
// match: (Bswap32 x)
// cond:
// result: (REVW x)
return true
}
}
-func rewriteValueARM64_OpBswap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpBswap64(v *Value) bool {
// match: (Bswap64 x)
// cond:
// result: (REV x)
return true
}
}
-func rewriteValueARM64_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpClosureCall(v *Value) bool {
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
return true
}
}
-func rewriteValueARM64_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCom16(v *Value) bool {
// match: (Com16 x)
// cond:
// result: (MVN x)
return true
}
}
-func rewriteValueARM64_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCom32(v *Value) bool {
// match: (Com32 x)
// cond:
// result: (MVN x)
return true
}
}
-func rewriteValueARM64_OpCom64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCom64(v *Value) bool {
// match: (Com64 x)
// cond:
// result: (MVN x)
return true
}
}
-func rewriteValueARM64_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCom8(v *Value) bool {
// match: (Com8 x)
// cond:
// result: (MVN x)
return true
}
}
-func rewriteValueARM64_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConst16(v *Value) bool {
// match: (Const16 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueARM64_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConst32(v *Value) bool {
// match: (Const32 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueARM64_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// cond:
// result: (FMOVSconst [val])
return true
}
}
-func rewriteValueARM64_OpConst64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConst64(v *Value) bool {
// match: (Const64 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueARM64_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// cond:
// result: (FMOVDconst [val])
return true
}
}
-func rewriteValueARM64_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConst8(v *Value) bool {
// match: (Const8 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueARM64_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// cond:
// result: (MOVDconst [b])
return true
}
}
-func rewriteValueARM64_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConstNil(v *Value) bool {
// match: (ConstNil)
// cond:
// result: (MOVDconst [0])
return true
}
}
-func rewriteValueARM64_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpConvert(v *Value) bool {
// match: (Convert x mem)
// cond:
// result: (MOVDconvert x mem)
return true
}
}
-func rewriteValueARM64_OpCtz32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCtz32(v *Value) bool {
b := v.Block
_ = b
// match: (Ctz32 <t> x)
return true
}
}
-func rewriteValueARM64_OpCtz64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCtz64(v *Value) bool {
b := v.Block
_ = b
// match: (Ctz64 <t> x)
return true
}
}
-func rewriteValueARM64_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32Fto32(v *Value) bool {
// match: (Cvt32Fto32 x)
// cond:
// result: (FCVTZSSW x)
return true
}
}
-func rewriteValueARM64_OpCvt32Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32Fto32U(v *Value) bool {
// match: (Cvt32Fto32U x)
// cond:
// result: (FCVTZUSW x)
return true
}
}
-func rewriteValueARM64_OpCvt32Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32Fto64(v *Value) bool {
// match: (Cvt32Fto64 x)
// cond:
// result: (FCVTZSS x)
return true
}
}
-func rewriteValueARM64_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F x)
// cond:
// result: (FCVTSD x)
return true
}
}
-func rewriteValueARM64_OpCvt32Fto64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32Fto64U(v *Value) bool {
// match: (Cvt32Fto64U x)
// cond:
// result: (FCVTZUS x)
return true
}
}
-func rewriteValueARM64_OpCvt32Uto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32Uto32F(v *Value) bool {
// match: (Cvt32Uto32F x)
// cond:
// result: (UCVTFWS x)
return true
}
}
-func rewriteValueARM64_OpCvt32Uto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32Uto64F(v *Value) bool {
// match: (Cvt32Uto64F x)
// cond:
// result: (UCVTFWD x)
return true
}
}
-func rewriteValueARM64_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32to32F(v *Value) bool {
// match: (Cvt32to32F x)
// cond:
// result: (SCVTFWS x)
return true
}
}
-func rewriteValueARM64_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt32to64F(v *Value) bool {
// match: (Cvt32to64F x)
// cond:
// result: (SCVTFWD x)
return true
}
}
-func rewriteValueARM64_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64Fto32(v *Value) bool {
// match: (Cvt64Fto32 x)
// cond:
// result: (FCVTZSDW x)
return true
}
}
-func rewriteValueARM64_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F x)
// cond:
// result: (FCVTDS x)
return true
}
}
-func rewriteValueARM64_OpCvt64Fto32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64Fto32U(v *Value) bool {
// match: (Cvt64Fto32U x)
// cond:
// result: (FCVTZUDW x)
return true
}
}
-func rewriteValueARM64_OpCvt64Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64Fto64(v *Value) bool {
// match: (Cvt64Fto64 x)
// cond:
// result: (FCVTZSD x)
return true
}
}
-func rewriteValueARM64_OpCvt64Fto64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64Fto64U(v *Value) bool {
// match: (Cvt64Fto64U x)
// cond:
// result: (FCVTZUD x)
return true
}
}
-func rewriteValueARM64_OpCvt64Uto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64Uto32F(v *Value) bool {
// match: (Cvt64Uto32F x)
// cond:
// result: (UCVTFS x)
return true
}
}
-func rewriteValueARM64_OpCvt64Uto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64Uto64F(v *Value) bool {
// match: (Cvt64Uto64F x)
// cond:
// result: (UCVTFD x)
return true
}
}
-func rewriteValueARM64_OpCvt64to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64to32F(v *Value) bool {
// match: (Cvt64to32F x)
// cond:
// result: (SCVTFS x)
return true
}
}
-func rewriteValueARM64_OpCvt64to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpCvt64to64F(v *Value) bool {
// match: (Cvt64to64F x)
// cond:
// result: (SCVTFD x)
return true
}
}
-func rewriteValueARM64_OpDiv16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16 x y)
// cond:
// result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16u x y)
// cond:
// result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64UDIVW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpDiv32(v *Value) bool {
// match: (Div32 x y)
// cond:
// result: (DIVW x y)
return true
}
}
-func rewriteValueARM64_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpDiv32F(v *Value) bool {
// match: (Div32F x y)
// cond:
// result: (FDIVS x y)
return true
}
}
-func rewriteValueARM64_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpDiv32u(v *Value) bool {
// match: (Div32u x y)
// cond:
// result: (UDIVW x y)
return true
}
}
-func rewriteValueARM64_OpDiv64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpDiv64(v *Value) bool {
// match: (Div64 x y)
// cond:
// result: (DIV x y)
return true
}
}
-func rewriteValueARM64_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpDiv64F(v *Value) bool {
// match: (Div64F x y)
// cond:
// result: (FDIVD x y)
return true
}
}
-func rewriteValueARM64_OpDiv64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpDiv64u(v *Value) bool {
// match: (Div64u x y)
// cond:
// result: (UDIV x y)
return true
}
}
-func rewriteValueARM64_OpDiv8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 x y)
// cond:
// result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u x y)
// cond:
// result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64UDIVW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpEq16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq16 x y)
// cond:
// result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64Equal)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpEq32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq32(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32 x y)
return true
}
}
-func rewriteValueARM64_OpEq32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32F x y)
return true
}
}
-func rewriteValueARM64_OpEq64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq64(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64 x y)
return true
}
}
-func rewriteValueARM64_OpEq64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64F x y)
return true
}
}
-func rewriteValueARM64_OpEq8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq8 x y)
// cond:
// result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64Equal)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpEqB(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEqB(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqB x y)
// cond:
- // result: (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
+ // result: (XOR (MOVDconst [1]) (XOR <fe.TypeBool()> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64XOR)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpARM64XOR, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpARM64XOR, fe.TypeBool())
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
// match: (EqPtr x y)
return true
}
}
-func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16U x y)
// cond:
// result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64GreaterEqualU)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpGeq32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32 x y)
return true
}
}
-func rewriteValueARM64_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32F x y)
return true
}
}
-func rewriteValueARM64_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32U x y)
return true
}
}
-func rewriteValueARM64_OpGeq64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64 x y)
return true
}
}
-func rewriteValueARM64_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64F x y)
return true
}
}
-func rewriteValueARM64_OpGeq64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64U x y)
return true
}
}
-func rewriteValueARM64_OpGeq8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64GreaterEqual)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8U x y)
// cond:
// result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64GreaterEqualU)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpGetClosurePtr(v *Value) bool {
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
return true
}
}
-func rewriteValueARM64_OpGreater16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64GreaterThan)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16U x y)
// cond:
// result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64GreaterThanU)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater32(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32 x y)
return true
}
}
-func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater32F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32F x y)
return true
}
}
-func rewriteValueARM64_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater32U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32U x y)
return true
}
}
-func rewriteValueARM64_OpGreater64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater64(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64 x y)
return true
}
}
-func rewriteValueARM64_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater64F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64F x y)
return true
}
}
-func rewriteValueARM64_OpGreater64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater64U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64U x y)
return true
}
}
-func rewriteValueARM64_OpGreater8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64GreaterThan)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8U x y)
// cond:
// result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64GreaterThanU)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpHmul32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul32 x y)
// cond:
- // result: (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
+ // result: (SRAconst (MULL <fe.TypeInt64()> x y) [32])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpARM64MULL, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MULL, fe.TypeInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpHmul32u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul32u x y)
// cond:
- // result: (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
+ // result: (SRAconst (UMULL <fe.TypeUInt64()> x y) [32])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRAconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpARM64UMULL, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64UMULL, fe.TypeUInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpHmul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpHmul64(v *Value) bool {
// match: (Hmul64 x y)
// cond:
// result: (MULH x y)
return true
}
}
-func rewriteValueARM64_OpHmul64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpHmul64u(v *Value) bool {
// match: (Hmul64u x y)
// cond:
// result: (UMULH x y)
return true
}
}
-func rewriteValueARM64_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpInterCall(v *Value) bool {
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
return true
}
}
-func rewriteValueARM64_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValueARM64_OpIsInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsInBounds idx len)
return true
}
}
-func rewriteValueARM64_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueARM64_OpIsNonNil(v *Value) bool {
b := v.Block
_ = b
// match: (IsNonNil ptr)
return true
}
}
-func rewriteValueARM64_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueARM64_OpIsSliceInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsSliceInBounds idx len)
return true
}
}
-func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16 x y)
// cond:
// result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64LessEqual)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16U x y)
// cond:
// result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64LessEqualU)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32 x y)
return true
}
}
-func rewriteValueARM64_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
return true
}
}
-func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32U x y)
return true
}
}
-func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64 x y)
return true
}
}
-func rewriteValueARM64_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64F x y)
return true
}
}
-func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq64U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64U x y)
return true
}
}
-func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8 x y)
// cond:
// result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64LessEqual)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8U x y)
// cond:
// result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64LessEqualU)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16 x y)
// cond:
// result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64LessThan)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16U x y)
// cond:
// result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64LessThanU)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpLess32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess32(v *Value) bool {
b := v.Block
_ = b
// match: (Less32 x y)
return true
}
}
-func rewriteValueARM64_OpLess32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess32F(v *Value) bool {
b := v.Block
_ = b
// match: (Less32F x y)
return true
}
}
-func rewriteValueARM64_OpLess32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess32U(v *Value) bool {
b := v.Block
_ = b
// match: (Less32U x y)
return true
}
}
-func rewriteValueARM64_OpLess64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess64(v *Value) bool {
b := v.Block
_ = b
// match: (Less64 x y)
return true
}
}
-func rewriteValueARM64_OpLess64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess64F(v *Value) bool {
b := v.Block
_ = b
// match: (Less64F x y)
return true
}
}
-func rewriteValueARM64_OpLess64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess64U(v *Value) bool {
b := v.Block
_ = b
// match: (Less64U x y)
return true
}
}
-func rewriteValueARM64_OpLess8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8 x y)
// cond:
// result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64LessThan)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpLess8U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8U x y)
// cond:
// result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64LessThanU)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpLoad(v *Value) bool {
// match: (Load <t> ptr mem)
// cond: t.IsBoolean()
// result: (MOVBUload ptr mem)
}
return false
}
-func rewriteValueARM64_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x16 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x32 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh16x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x64 x (MOVDconst [c]))
return true
}
}
-func rewriteValueARM64_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x8 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x16 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x32 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh32x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x64 x (MOVDconst [c]))
return true
}
}
-func rewriteValueARM64_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x8 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh64x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x16 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh64x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh64x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x32 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh64x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x64 x (MOVDconst [c]))
return true
}
}
-func rewriteValueARM64_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x8 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x16 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x32 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh8x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x64 x (MOVDconst [c]))
return true
}
}
-func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x8 <t> x y)
// cond:
// result: (CSELULT (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpMod16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16 x y)
// cond:
// result: (MODW (SignExt16to32 x) (SignExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64MODW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpMod16u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpMod16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16u x y)
// cond:
// result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64UMODW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpMod32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMod32(v *Value) bool {
// match: (Mod32 x y)
// cond:
// result: (MODW x y)
return true
}
}
-func rewriteValueARM64_OpMod32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMod32u(v *Value) bool {
// match: (Mod32u x y)
// cond:
// result: (UMODW x y)
return true
}
}
-func rewriteValueARM64_OpMod64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMod64(v *Value) bool {
// match: (Mod64 x y)
// cond:
// result: (MOD x y)
return true
}
}
-func rewriteValueARM64_OpMod64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMod64u(v *Value) bool {
// match: (Mod64u x y)
// cond:
// result: (UMOD x y)
return true
}
}
-func rewriteValueARM64_OpMod8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpMod8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8 x y)
// cond:
// result: (MODW (SignExt8to32 x) (SignExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64MODW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpMod8u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpMod8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8u x y)
// cond:
// result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64UMODW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueARM64_OpMove(v *Value, config *Config) bool {
+func rewriteValueARM64_OpMove(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Move [0] _ _ mem)
// cond:
// result: mem
mem := v.Args[2]
v.reset(OpARM64MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpARM64MOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpARM64MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, fe.TypeUInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpARM64MOVDstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, fe.TypeUInt64())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpARM64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, fe.TypeUInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, fe.TypeUInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpARM64MOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, fe.TypeUInt8())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpARM64MOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, fe.TypeUInt16())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpARM64MOVBstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, fe.TypeUInt8())
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, fe.TypeUInt16())
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, fe.TypeUInt32())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v.reset(OpARM64MOVWstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, fe.TypeUInt32())
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDload, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, fe.TypeUInt64())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpARM64MOVDstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, fe.TypeUInt64())
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDload, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, fe.TypeUInt64())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpARM64MOVDstore)
v.AuxInt = 16
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, fe.TypeUInt64())
v0.AuxInt = 16
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDload, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, fe.TypeUInt64())
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpARM64MOVDload, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpARM64MOVDload, fe.TypeUInt64())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
}
return false
}
-func rewriteValueARM64_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMul16(v *Value) bool {
// match: (Mul16 x y)
// cond:
// result: (MULW x y)
return true
}
}
-func rewriteValueARM64_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMul32(v *Value) bool {
// match: (Mul32 x y)
// cond:
// result: (MULW x y)
return true
}
}
-func rewriteValueARM64_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMul32F(v *Value) bool {
// match: (Mul32F x y)
// cond:
// result: (FMULS x y)
return true
}
}
-func rewriteValueARM64_OpMul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMul64(v *Value) bool {
// match: (Mul64 x y)
// cond:
// result: (MUL x y)
return true
}
}
-func rewriteValueARM64_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMul64F(v *Value) bool {
// match: (Mul64F x y)
// cond:
// result: (FMULD x y)
return true
}
}
-func rewriteValueARM64_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpMul8(v *Value) bool {
// match: (Mul8 x y)
// cond:
// result: (MULW x y)
return true
}
}
-func rewriteValueARM64_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpNeg16(v *Value) bool {
// match: (Neg16 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValueARM64_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpNeg32(v *Value) bool {
// match: (Neg32 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValueARM64_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpNeg32F(v *Value) bool {
// match: (Neg32F x)
// cond:
// result: (FNEGS x)
return true
}
}
-func rewriteValueARM64_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpNeg64(v *Value) bool {
// match: (Neg64 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValueARM64_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpNeg64F(v *Value) bool {
// match: (Neg64F x)
// cond:
// result: (FNEGD x)
return true
}
}
-func rewriteValueARM64_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpNeg8(v *Value) bool {
// match: (Neg8 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValueARM64_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpNeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq16 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpARM64NotEqual)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpNeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32 x y)
return true
}
}
-func rewriteValueARM64_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpNeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32F x y)
return true
}
}
-func rewriteValueARM64_OpNeq64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpNeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64 x y)
return true
}
}
-func rewriteValueARM64_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpNeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
return true
}
}
-func rewriteValueARM64_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpNeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq8 x y)
// cond:
// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpARM64NotEqual)
v0 := b.NewValue0(v.Pos, OpARM64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueARM64_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpNeqB(v *Value) bool {
// match: (NeqB x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM64_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueARM64_OpNeqPtr(v *Value) bool {
b := v.Block
_ = b
// match: (NeqPtr x y)
return true
}
}
-func rewriteValueARM64_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpNilCheck(v *Value) bool {
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
return true
}
}
-func rewriteValueARM64_OpNot(v *Value, config *Config) bool {
+func rewriteValueARM64_OpNot(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Not x)
// cond:
// result: (XOR (MOVDconst [1]) x)
for {
x := v.Args[0]
v.reset(OpARM64XOR)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
v.AddArg(x)
return true
}
}
-func rewriteValueARM64_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpOffPtr(v *Value) bool {
// match: (OffPtr [off] ptr:(SP))
// cond:
// result: (MOVDaddr [off] ptr)
return true
}
}
-func rewriteValueARM64_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpOr16(v *Value) bool {
// match: (Or16 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM64_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpOr32(v *Value) bool {
// match: (Or32 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM64_OpOr64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpOr64(v *Value) bool {
// match: (Or64 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM64_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpOr8(v *Value) bool {
// match: (Or8 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM64_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpOrB(v *Value) bool {
// match: (OrB x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueARM64_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpRound32F(v *Value) bool {
// match: (Round32F x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM64_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpRound64F(v *Value) bool {
// match: (Round64F x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM64_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux16 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux32 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux64 x (MOVDconst [c]))
// cond: uint64(c) < 16
// result: (SRLconst (ZeroExt16to64 x) [c])
}
v.reset(OpARM64SRLconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueARM64_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux8 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x16 x y)
// cond:
// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x32 x y)
// cond:
// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x64 x (MOVDconst [c]))
// cond: uint64(c) < 16
// result: (SRAconst (SignExt16to64 x) [c])
}
v.reset(OpARM64SRAconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpARM64SRAconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
return true
}
}
-func rewriteValueARM64_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x8 x y)
// cond:
// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux16 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux32 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh32Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux64 x (MOVDconst [c]))
// cond: uint64(c) < 32
// result: (SRLconst (ZeroExt32to64 x) [c])
}
v.reset(OpARM64SRLconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueARM64_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux8 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x16 x y)
// cond:
// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x32 x y)
// cond:
// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x64 x (MOVDconst [c]))
// cond: uint64(c) < 32
// result: (SRAconst (SignExt32to64 x) [c])
}
v.reset(OpARM64SRAconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpARM64SRAconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
return true
}
}
-func rewriteValueARM64_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x8 x y)
// cond:
// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh64Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux16 <t> x y)
// cond:
// result: (CSELULT (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh64Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux32 <t> x y)
// cond:
// result: (CSELULT (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh64Ux64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux64 x (MOVDconst [c]))
return true
}
}
-func rewriteValueARM64_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh64Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux8 <t> x y)
// cond:
// result: (CSELULT (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x16 x y)
// cond:
// result: (SRA x (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
v.reset(OpARM64SRA)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v0.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh64x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x32 x y)
// cond:
// result: (SRA x (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
v.reset(OpARM64SRA)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v0.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh64x64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x64 x (MOVDconst [c]))
return true
}
}
-func rewriteValueARM64_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x8 x y)
// cond:
// result: (SRA x (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
v.reset(OpARM64SRA)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(y)
v0.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
v0.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v3.AuxInt = 64
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
return true
}
}
-func rewriteValueARM64_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux16 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux32 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux64 x (MOVDconst [c]))
// cond: uint64(c) < 8
// result: (SRLconst (ZeroExt8to64 x) [c])
}
v.reset(OpARM64SRLconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueARM64_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux8 <t> x y)
// cond:
// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
y := v.Args[1]
v.reset(OpARM64CSELULT)
v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
v.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x16 x y)
// cond:
// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x32 x y)
// cond:
// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x64 x (MOVDconst [c]))
// cond: uint64(c) < 8
// result: (SRAconst (SignExt8to64 x) [c])
}
v.reset(OpARM64SRAconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpARM64SRAconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
return true
}
}
-func rewriteValueARM64_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x8 x y)
// cond:
// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpARM64SRA)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
v1.AddArg(v3)
v4 := b.NewValue0(v.Pos, OpARM64CMPconst, TypeFlags)
v4.AuxInt = 64
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueARM64_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValueARM64_OpSignExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSignExt16to64(v *Value) bool {
// match: (SignExt16to64 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValueARM64_OpSignExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSignExt32to64(v *Value) bool {
// match: (SignExt32to64 x)
// cond:
// result: (MOVWreg x)
return true
}
}
-func rewriteValueARM64_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueARM64_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueARM64_OpSignExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSignExt8to64(v *Value) bool {
// match: (SignExt8to64 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueARM64_OpSlicemask(v *Value, config *Config) bool {
+func rewriteValueARM64_OpSlicemask(v *Value) bool {
b := v.Block
_ = b
// match: (Slicemask <t> x)
return true
}
}
-func rewriteValueARM64_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSqrt(v *Value) bool {
// match: (Sqrt x)
// cond:
// result: (FSQRTD x)
return true
}
}
-func rewriteValueARM64_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpStaticCall(v *Value) bool {
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
return true
}
}
-func rewriteValueARM64_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpStore(v *Value) bool {
// match: (Store {t} ptr val mem)
// cond: t.(Type).Size() == 1
// result: (MOVBstore ptr val mem)
}
return false
}
-func rewriteValueARM64_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSub16(v *Value) bool {
// match: (Sub16 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM64_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSub32(v *Value) bool {
// match: (Sub32 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM64_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSub32F(v *Value) bool {
// match: (Sub32F x y)
// cond:
// result: (FSUBS x y)
return true
}
}
-func rewriteValueARM64_OpSub64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSub64(v *Value) bool {
// match: (Sub64 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM64_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSub64F(v *Value) bool {
// match: (Sub64F x y)
// cond:
// result: (FSUBD x y)
return true
}
}
-func rewriteValueARM64_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSub8(v *Value) bool {
// match: (Sub8 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM64_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpSubPtr(v *Value) bool {
// match: (SubPtr x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueARM64_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM64_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM64_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM64_OpTrunc64to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpTrunc64to16(v *Value) bool {
// match: (Trunc64to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM64_OpTrunc64to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpTrunc64to32(v *Value) bool {
// match: (Trunc64to32 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM64_OpTrunc64to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpTrunc64to8(v *Value) bool {
// match: (Trunc64to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueARM64_OpXor16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpXor16(v *Value) bool {
// match: (Xor16 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM64_OpXor32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpXor32(v *Value) bool {
// match: (Xor32 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM64_OpXor64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpXor64(v *Value) bool {
// match: (Xor64 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM64_OpXor8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpXor8(v *Value) bool {
// match: (Xor8 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueARM64_OpZero(v *Value, config *Config) bool {
+func rewriteValueARM64_OpZero(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Zero [0] _ mem)
// cond:
// result: mem
mem := v.Args[1]
v.reset(OpARM64MOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
mem := v.Args[1]
v.reset(OpARM64MOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
mem := v.Args[1]
v.reset(OpARM64MOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
mem := v.Args[1]
v.reset(OpARM64MOVDstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v.reset(OpARM64MOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpARM64MOVBstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpARM64MOVHstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpARM64MOVBstore)
v.AuxInt = 6
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, TypeMem)
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
v.reset(OpARM64MOVWstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpARM64MOVDstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpARM64MOVDstore)
v.AuxInt = 16
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
v1.AuxInt = 8
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, TypeMem)
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, fe.TypeUInt64())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
}
return false
}
-func rewriteValueARM64_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVHUreg x)
return true
}
}
-func rewriteValueARM64_OpZeroExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpZeroExt16to64(v *Value) bool {
// match: (ZeroExt16to64 x)
// cond:
// result: (MOVHUreg x)
return true
}
}
-func rewriteValueARM64_OpZeroExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpZeroExt32to64(v *Value) bool {
// match: (ZeroExt32to64 x)
// cond:
// result: (MOVWUreg x)
return true
}
}
-func rewriteValueARM64_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteValueARM64_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteValueARM64_OpZeroExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueARM64_OpZeroExt8to64(v *Value) bool {
// match: (ZeroExt8to64 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteBlockARM64(b *Block, config *Config) bool {
+func rewriteBlockARM64(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case BlockARM64EQ:
// match: (EQ (CMPconst [0] x) yes no)
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueMIPS(v *Value, config *Config) bool {
+func rewriteValueMIPS(v *Value) bool {
switch v.Op {
case OpAdd16:
- return rewriteValueMIPS_OpAdd16(v, config)
+ return rewriteValueMIPS_OpAdd16(v)
case OpAdd32:
- return rewriteValueMIPS_OpAdd32(v, config)
+ return rewriteValueMIPS_OpAdd32(v)
case OpAdd32F:
- return rewriteValueMIPS_OpAdd32F(v, config)
+ return rewriteValueMIPS_OpAdd32F(v)
case OpAdd32withcarry:
- return rewriteValueMIPS_OpAdd32withcarry(v, config)
+ return rewriteValueMIPS_OpAdd32withcarry(v)
case OpAdd64F:
- return rewriteValueMIPS_OpAdd64F(v, config)
+ return rewriteValueMIPS_OpAdd64F(v)
case OpAdd8:
- return rewriteValueMIPS_OpAdd8(v, config)
+ return rewriteValueMIPS_OpAdd8(v)
case OpAddPtr:
- return rewriteValueMIPS_OpAddPtr(v, config)
+ return rewriteValueMIPS_OpAddPtr(v)
case OpAddr:
- return rewriteValueMIPS_OpAddr(v, config)
+ return rewriteValueMIPS_OpAddr(v)
case OpAnd16:
- return rewriteValueMIPS_OpAnd16(v, config)
+ return rewriteValueMIPS_OpAnd16(v)
case OpAnd32:
- return rewriteValueMIPS_OpAnd32(v, config)
+ return rewriteValueMIPS_OpAnd32(v)
case OpAnd8:
- return rewriteValueMIPS_OpAnd8(v, config)
+ return rewriteValueMIPS_OpAnd8(v)
case OpAndB:
- return rewriteValueMIPS_OpAndB(v, config)
+ return rewriteValueMIPS_OpAndB(v)
case OpAtomicAdd32:
- return rewriteValueMIPS_OpAtomicAdd32(v, config)
+ return rewriteValueMIPS_OpAtomicAdd32(v)
case OpAtomicAnd8:
- return rewriteValueMIPS_OpAtomicAnd8(v, config)
+ return rewriteValueMIPS_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
- return rewriteValueMIPS_OpAtomicCompareAndSwap32(v, config)
+ return rewriteValueMIPS_OpAtomicCompareAndSwap32(v)
case OpAtomicExchange32:
- return rewriteValueMIPS_OpAtomicExchange32(v, config)
+ return rewriteValueMIPS_OpAtomicExchange32(v)
case OpAtomicLoad32:
- return rewriteValueMIPS_OpAtomicLoad32(v, config)
+ return rewriteValueMIPS_OpAtomicLoad32(v)
case OpAtomicLoadPtr:
- return rewriteValueMIPS_OpAtomicLoadPtr(v, config)
+ return rewriteValueMIPS_OpAtomicLoadPtr(v)
case OpAtomicOr8:
- return rewriteValueMIPS_OpAtomicOr8(v, config)
+ return rewriteValueMIPS_OpAtomicOr8(v)
case OpAtomicStore32:
- return rewriteValueMIPS_OpAtomicStore32(v, config)
+ return rewriteValueMIPS_OpAtomicStore32(v)
case OpAtomicStorePtrNoWB:
- return rewriteValueMIPS_OpAtomicStorePtrNoWB(v, config)
+ return rewriteValueMIPS_OpAtomicStorePtrNoWB(v)
case OpAvg32u:
- return rewriteValueMIPS_OpAvg32u(v, config)
+ return rewriteValueMIPS_OpAvg32u(v)
case OpBitLen32:
- return rewriteValueMIPS_OpBitLen32(v, config)
+ return rewriteValueMIPS_OpBitLen32(v)
case OpClosureCall:
- return rewriteValueMIPS_OpClosureCall(v, config)
+ return rewriteValueMIPS_OpClosureCall(v)
case OpCom16:
- return rewriteValueMIPS_OpCom16(v, config)
+ return rewriteValueMIPS_OpCom16(v)
case OpCom32:
- return rewriteValueMIPS_OpCom32(v, config)
+ return rewriteValueMIPS_OpCom32(v)
case OpCom8:
- return rewriteValueMIPS_OpCom8(v, config)
+ return rewriteValueMIPS_OpCom8(v)
case OpConst16:
- return rewriteValueMIPS_OpConst16(v, config)
+ return rewriteValueMIPS_OpConst16(v)
case OpConst32:
- return rewriteValueMIPS_OpConst32(v, config)
+ return rewriteValueMIPS_OpConst32(v)
case OpConst32F:
- return rewriteValueMIPS_OpConst32F(v, config)
+ return rewriteValueMIPS_OpConst32F(v)
case OpConst64F:
- return rewriteValueMIPS_OpConst64F(v, config)
+ return rewriteValueMIPS_OpConst64F(v)
case OpConst8:
- return rewriteValueMIPS_OpConst8(v, config)
+ return rewriteValueMIPS_OpConst8(v)
case OpConstBool:
- return rewriteValueMIPS_OpConstBool(v, config)
+ return rewriteValueMIPS_OpConstBool(v)
case OpConstNil:
- return rewriteValueMIPS_OpConstNil(v, config)
+ return rewriteValueMIPS_OpConstNil(v)
case OpConvert:
- return rewriteValueMIPS_OpConvert(v, config)
+ return rewriteValueMIPS_OpConvert(v)
case OpCtz32:
- return rewriteValueMIPS_OpCtz32(v, config)
+ return rewriteValueMIPS_OpCtz32(v)
case OpCvt32Fto32:
- return rewriteValueMIPS_OpCvt32Fto32(v, config)
+ return rewriteValueMIPS_OpCvt32Fto32(v)
case OpCvt32Fto64F:
- return rewriteValueMIPS_OpCvt32Fto64F(v, config)
+ return rewriteValueMIPS_OpCvt32Fto64F(v)
case OpCvt32to32F:
- return rewriteValueMIPS_OpCvt32to32F(v, config)
+ return rewriteValueMIPS_OpCvt32to32F(v)
case OpCvt32to64F:
- return rewriteValueMIPS_OpCvt32to64F(v, config)
+ return rewriteValueMIPS_OpCvt32to64F(v)
case OpCvt64Fto32:
- return rewriteValueMIPS_OpCvt64Fto32(v, config)
+ return rewriteValueMIPS_OpCvt64Fto32(v)
case OpCvt64Fto32F:
- return rewriteValueMIPS_OpCvt64Fto32F(v, config)
+ return rewriteValueMIPS_OpCvt64Fto32F(v)
case OpDiv16:
- return rewriteValueMIPS_OpDiv16(v, config)
+ return rewriteValueMIPS_OpDiv16(v)
case OpDiv16u:
- return rewriteValueMIPS_OpDiv16u(v, config)
+ return rewriteValueMIPS_OpDiv16u(v)
case OpDiv32:
- return rewriteValueMIPS_OpDiv32(v, config)
+ return rewriteValueMIPS_OpDiv32(v)
case OpDiv32F:
- return rewriteValueMIPS_OpDiv32F(v, config)
+ return rewriteValueMIPS_OpDiv32F(v)
case OpDiv32u:
- return rewriteValueMIPS_OpDiv32u(v, config)
+ return rewriteValueMIPS_OpDiv32u(v)
case OpDiv64F:
- return rewriteValueMIPS_OpDiv64F(v, config)
+ return rewriteValueMIPS_OpDiv64F(v)
case OpDiv8:
- return rewriteValueMIPS_OpDiv8(v, config)
+ return rewriteValueMIPS_OpDiv8(v)
case OpDiv8u:
- return rewriteValueMIPS_OpDiv8u(v, config)
+ return rewriteValueMIPS_OpDiv8u(v)
case OpEq16:
- return rewriteValueMIPS_OpEq16(v, config)
+ return rewriteValueMIPS_OpEq16(v)
case OpEq32:
- return rewriteValueMIPS_OpEq32(v, config)
+ return rewriteValueMIPS_OpEq32(v)
case OpEq32F:
- return rewriteValueMIPS_OpEq32F(v, config)
+ return rewriteValueMIPS_OpEq32F(v)
case OpEq64F:
- return rewriteValueMIPS_OpEq64F(v, config)
+ return rewriteValueMIPS_OpEq64F(v)
case OpEq8:
- return rewriteValueMIPS_OpEq8(v, config)
+ return rewriteValueMIPS_OpEq8(v)
case OpEqB:
- return rewriteValueMIPS_OpEqB(v, config)
+ return rewriteValueMIPS_OpEqB(v)
case OpEqPtr:
- return rewriteValueMIPS_OpEqPtr(v, config)
+ return rewriteValueMIPS_OpEqPtr(v)
case OpGeq16:
- return rewriteValueMIPS_OpGeq16(v, config)
+ return rewriteValueMIPS_OpGeq16(v)
case OpGeq16U:
- return rewriteValueMIPS_OpGeq16U(v, config)
+ return rewriteValueMIPS_OpGeq16U(v)
case OpGeq32:
- return rewriteValueMIPS_OpGeq32(v, config)
+ return rewriteValueMIPS_OpGeq32(v)
case OpGeq32F:
- return rewriteValueMIPS_OpGeq32F(v, config)
+ return rewriteValueMIPS_OpGeq32F(v)
case OpGeq32U:
- return rewriteValueMIPS_OpGeq32U(v, config)
+ return rewriteValueMIPS_OpGeq32U(v)
case OpGeq64F:
- return rewriteValueMIPS_OpGeq64F(v, config)
+ return rewriteValueMIPS_OpGeq64F(v)
case OpGeq8:
- return rewriteValueMIPS_OpGeq8(v, config)
+ return rewriteValueMIPS_OpGeq8(v)
case OpGeq8U:
- return rewriteValueMIPS_OpGeq8U(v, config)
+ return rewriteValueMIPS_OpGeq8U(v)
case OpGetClosurePtr:
- return rewriteValueMIPS_OpGetClosurePtr(v, config)
+ return rewriteValueMIPS_OpGetClosurePtr(v)
case OpGreater16:
- return rewriteValueMIPS_OpGreater16(v, config)
+ return rewriteValueMIPS_OpGreater16(v)
case OpGreater16U:
- return rewriteValueMIPS_OpGreater16U(v, config)
+ return rewriteValueMIPS_OpGreater16U(v)
case OpGreater32:
- return rewriteValueMIPS_OpGreater32(v, config)
+ return rewriteValueMIPS_OpGreater32(v)
case OpGreater32F:
- return rewriteValueMIPS_OpGreater32F(v, config)
+ return rewriteValueMIPS_OpGreater32F(v)
case OpGreater32U:
- return rewriteValueMIPS_OpGreater32U(v, config)
+ return rewriteValueMIPS_OpGreater32U(v)
case OpGreater64F:
- return rewriteValueMIPS_OpGreater64F(v, config)
+ return rewriteValueMIPS_OpGreater64F(v)
case OpGreater8:
- return rewriteValueMIPS_OpGreater8(v, config)
+ return rewriteValueMIPS_OpGreater8(v)
case OpGreater8U:
- return rewriteValueMIPS_OpGreater8U(v, config)
+ return rewriteValueMIPS_OpGreater8U(v)
case OpHmul32:
- return rewriteValueMIPS_OpHmul32(v, config)
+ return rewriteValueMIPS_OpHmul32(v)
case OpHmul32u:
- return rewriteValueMIPS_OpHmul32u(v, config)
+ return rewriteValueMIPS_OpHmul32u(v)
case OpInterCall:
- return rewriteValueMIPS_OpInterCall(v, config)
+ return rewriteValueMIPS_OpInterCall(v)
case OpIsInBounds:
- return rewriteValueMIPS_OpIsInBounds(v, config)
+ return rewriteValueMIPS_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValueMIPS_OpIsNonNil(v, config)
+ return rewriteValueMIPS_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValueMIPS_OpIsSliceInBounds(v, config)
+ return rewriteValueMIPS_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValueMIPS_OpLeq16(v, config)
+ return rewriteValueMIPS_OpLeq16(v)
case OpLeq16U:
- return rewriteValueMIPS_OpLeq16U(v, config)
+ return rewriteValueMIPS_OpLeq16U(v)
case OpLeq32:
- return rewriteValueMIPS_OpLeq32(v, config)
+ return rewriteValueMIPS_OpLeq32(v)
case OpLeq32F:
- return rewriteValueMIPS_OpLeq32F(v, config)
+ return rewriteValueMIPS_OpLeq32F(v)
case OpLeq32U:
- return rewriteValueMIPS_OpLeq32U(v, config)
+ return rewriteValueMIPS_OpLeq32U(v)
case OpLeq64F:
- return rewriteValueMIPS_OpLeq64F(v, config)
+ return rewriteValueMIPS_OpLeq64F(v)
case OpLeq8:
- return rewriteValueMIPS_OpLeq8(v, config)
+ return rewriteValueMIPS_OpLeq8(v)
case OpLeq8U:
- return rewriteValueMIPS_OpLeq8U(v, config)
+ return rewriteValueMIPS_OpLeq8U(v)
case OpLess16:
- return rewriteValueMIPS_OpLess16(v, config)
+ return rewriteValueMIPS_OpLess16(v)
case OpLess16U:
- return rewriteValueMIPS_OpLess16U(v, config)
+ return rewriteValueMIPS_OpLess16U(v)
case OpLess32:
- return rewriteValueMIPS_OpLess32(v, config)
+ return rewriteValueMIPS_OpLess32(v)
case OpLess32F:
- return rewriteValueMIPS_OpLess32F(v, config)
+ return rewriteValueMIPS_OpLess32F(v)
case OpLess32U:
- return rewriteValueMIPS_OpLess32U(v, config)
+ return rewriteValueMIPS_OpLess32U(v)
case OpLess64F:
- return rewriteValueMIPS_OpLess64F(v, config)
+ return rewriteValueMIPS_OpLess64F(v)
case OpLess8:
- return rewriteValueMIPS_OpLess8(v, config)
+ return rewriteValueMIPS_OpLess8(v)
case OpLess8U:
- return rewriteValueMIPS_OpLess8U(v, config)
+ return rewriteValueMIPS_OpLess8U(v)
case OpLoad:
- return rewriteValueMIPS_OpLoad(v, config)
+ return rewriteValueMIPS_OpLoad(v)
case OpLsh16x16:
- return rewriteValueMIPS_OpLsh16x16(v, config)
+ return rewriteValueMIPS_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValueMIPS_OpLsh16x32(v, config)
+ return rewriteValueMIPS_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValueMIPS_OpLsh16x64(v, config)
+ return rewriteValueMIPS_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValueMIPS_OpLsh16x8(v, config)
+ return rewriteValueMIPS_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValueMIPS_OpLsh32x16(v, config)
+ return rewriteValueMIPS_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValueMIPS_OpLsh32x32(v, config)
+ return rewriteValueMIPS_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValueMIPS_OpLsh32x64(v, config)
+ return rewriteValueMIPS_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValueMIPS_OpLsh32x8(v, config)
+ return rewriteValueMIPS_OpLsh32x8(v)
case OpLsh8x16:
- return rewriteValueMIPS_OpLsh8x16(v, config)
+ return rewriteValueMIPS_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValueMIPS_OpLsh8x32(v, config)
+ return rewriteValueMIPS_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValueMIPS_OpLsh8x64(v, config)
+ return rewriteValueMIPS_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValueMIPS_OpLsh8x8(v, config)
+ return rewriteValueMIPS_OpLsh8x8(v)
case OpMIPSADD:
- return rewriteValueMIPS_OpMIPSADD(v, config)
+ return rewriteValueMIPS_OpMIPSADD(v)
case OpMIPSADDconst:
- return rewriteValueMIPS_OpMIPSADDconst(v, config)
+ return rewriteValueMIPS_OpMIPSADDconst(v)
case OpMIPSAND:
- return rewriteValueMIPS_OpMIPSAND(v, config)
+ return rewriteValueMIPS_OpMIPSAND(v)
case OpMIPSANDconst:
- return rewriteValueMIPS_OpMIPSANDconst(v, config)
+ return rewriteValueMIPS_OpMIPSANDconst(v)
case OpMIPSCMOVZ:
- return rewriteValueMIPS_OpMIPSCMOVZ(v, config)
+ return rewriteValueMIPS_OpMIPSCMOVZ(v)
case OpMIPSCMOVZzero:
- return rewriteValueMIPS_OpMIPSCMOVZzero(v, config)
+ return rewriteValueMIPS_OpMIPSCMOVZzero(v)
case OpMIPSLoweredAtomicAdd:
- return rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v, config)
+ return rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v)
case OpMIPSLoweredAtomicStore:
- return rewriteValueMIPS_OpMIPSLoweredAtomicStore(v, config)
+ return rewriteValueMIPS_OpMIPSLoweredAtomicStore(v)
case OpMIPSMOVBUload:
- return rewriteValueMIPS_OpMIPSMOVBUload(v, config)
+ return rewriteValueMIPS_OpMIPSMOVBUload(v)
case OpMIPSMOVBUreg:
- return rewriteValueMIPS_OpMIPSMOVBUreg(v, config)
+ return rewriteValueMIPS_OpMIPSMOVBUreg(v)
case OpMIPSMOVBload:
- return rewriteValueMIPS_OpMIPSMOVBload(v, config)
+ return rewriteValueMIPS_OpMIPSMOVBload(v)
case OpMIPSMOVBreg:
- return rewriteValueMIPS_OpMIPSMOVBreg(v, config)
+ return rewriteValueMIPS_OpMIPSMOVBreg(v)
case OpMIPSMOVBstore:
- return rewriteValueMIPS_OpMIPSMOVBstore(v, config)
+ return rewriteValueMIPS_OpMIPSMOVBstore(v)
case OpMIPSMOVBstorezero:
- return rewriteValueMIPS_OpMIPSMOVBstorezero(v, config)
+ return rewriteValueMIPS_OpMIPSMOVBstorezero(v)
case OpMIPSMOVDload:
- return rewriteValueMIPS_OpMIPSMOVDload(v, config)
+ return rewriteValueMIPS_OpMIPSMOVDload(v)
case OpMIPSMOVDstore:
- return rewriteValueMIPS_OpMIPSMOVDstore(v, config)
+ return rewriteValueMIPS_OpMIPSMOVDstore(v)
case OpMIPSMOVFload:
- return rewriteValueMIPS_OpMIPSMOVFload(v, config)
+ return rewriteValueMIPS_OpMIPSMOVFload(v)
case OpMIPSMOVFstore:
- return rewriteValueMIPS_OpMIPSMOVFstore(v, config)
+ return rewriteValueMIPS_OpMIPSMOVFstore(v)
case OpMIPSMOVHUload:
- return rewriteValueMIPS_OpMIPSMOVHUload(v, config)
+ return rewriteValueMIPS_OpMIPSMOVHUload(v)
case OpMIPSMOVHUreg:
- return rewriteValueMIPS_OpMIPSMOVHUreg(v, config)
+ return rewriteValueMIPS_OpMIPSMOVHUreg(v)
case OpMIPSMOVHload:
- return rewriteValueMIPS_OpMIPSMOVHload(v, config)
+ return rewriteValueMIPS_OpMIPSMOVHload(v)
case OpMIPSMOVHreg:
- return rewriteValueMIPS_OpMIPSMOVHreg(v, config)
+ return rewriteValueMIPS_OpMIPSMOVHreg(v)
case OpMIPSMOVHstore:
- return rewriteValueMIPS_OpMIPSMOVHstore(v, config)
+ return rewriteValueMIPS_OpMIPSMOVHstore(v)
case OpMIPSMOVHstorezero:
- return rewriteValueMIPS_OpMIPSMOVHstorezero(v, config)
+ return rewriteValueMIPS_OpMIPSMOVHstorezero(v)
case OpMIPSMOVWload:
- return rewriteValueMIPS_OpMIPSMOVWload(v, config)
+ return rewriteValueMIPS_OpMIPSMOVWload(v)
case OpMIPSMOVWreg:
- return rewriteValueMIPS_OpMIPSMOVWreg(v, config)
+ return rewriteValueMIPS_OpMIPSMOVWreg(v)
case OpMIPSMOVWstore:
- return rewriteValueMIPS_OpMIPSMOVWstore(v, config)
+ return rewriteValueMIPS_OpMIPSMOVWstore(v)
case OpMIPSMOVWstorezero:
- return rewriteValueMIPS_OpMIPSMOVWstorezero(v, config)
+ return rewriteValueMIPS_OpMIPSMOVWstorezero(v)
case OpMIPSMUL:
- return rewriteValueMIPS_OpMIPSMUL(v, config)
+ return rewriteValueMIPS_OpMIPSMUL(v)
case OpMIPSNEG:
- return rewriteValueMIPS_OpMIPSNEG(v, config)
+ return rewriteValueMIPS_OpMIPSNEG(v)
case OpMIPSNOR:
- return rewriteValueMIPS_OpMIPSNOR(v, config)
+ return rewriteValueMIPS_OpMIPSNOR(v)
case OpMIPSNORconst:
- return rewriteValueMIPS_OpMIPSNORconst(v, config)
+ return rewriteValueMIPS_OpMIPSNORconst(v)
case OpMIPSOR:
- return rewriteValueMIPS_OpMIPSOR(v, config)
+ return rewriteValueMIPS_OpMIPSOR(v)
case OpMIPSORconst:
- return rewriteValueMIPS_OpMIPSORconst(v, config)
+ return rewriteValueMIPS_OpMIPSORconst(v)
case OpMIPSSGT:
- return rewriteValueMIPS_OpMIPSSGT(v, config)
+ return rewriteValueMIPS_OpMIPSSGT(v)
case OpMIPSSGTU:
- return rewriteValueMIPS_OpMIPSSGTU(v, config)
+ return rewriteValueMIPS_OpMIPSSGTU(v)
case OpMIPSSGTUconst:
- return rewriteValueMIPS_OpMIPSSGTUconst(v, config)
+ return rewriteValueMIPS_OpMIPSSGTUconst(v)
case OpMIPSSGTUzero:
- return rewriteValueMIPS_OpMIPSSGTUzero(v, config)
+ return rewriteValueMIPS_OpMIPSSGTUzero(v)
case OpMIPSSGTconst:
- return rewriteValueMIPS_OpMIPSSGTconst(v, config)
+ return rewriteValueMIPS_OpMIPSSGTconst(v)
case OpMIPSSGTzero:
- return rewriteValueMIPS_OpMIPSSGTzero(v, config)
+ return rewriteValueMIPS_OpMIPSSGTzero(v)
case OpMIPSSLL:
- return rewriteValueMIPS_OpMIPSSLL(v, config)
+ return rewriteValueMIPS_OpMIPSSLL(v)
case OpMIPSSLLconst:
- return rewriteValueMIPS_OpMIPSSLLconst(v, config)
+ return rewriteValueMIPS_OpMIPSSLLconst(v)
case OpMIPSSRA:
- return rewriteValueMIPS_OpMIPSSRA(v, config)
+ return rewriteValueMIPS_OpMIPSSRA(v)
case OpMIPSSRAconst:
- return rewriteValueMIPS_OpMIPSSRAconst(v, config)
+ return rewriteValueMIPS_OpMIPSSRAconst(v)
case OpMIPSSRL:
- return rewriteValueMIPS_OpMIPSSRL(v, config)
+ return rewriteValueMIPS_OpMIPSSRL(v)
case OpMIPSSRLconst:
- return rewriteValueMIPS_OpMIPSSRLconst(v, config)
+ return rewriteValueMIPS_OpMIPSSRLconst(v)
case OpMIPSSUB:
- return rewriteValueMIPS_OpMIPSSUB(v, config)
+ return rewriteValueMIPS_OpMIPSSUB(v)
case OpMIPSSUBconst:
- return rewriteValueMIPS_OpMIPSSUBconst(v, config)
+ return rewriteValueMIPS_OpMIPSSUBconst(v)
case OpMIPSXOR:
- return rewriteValueMIPS_OpMIPSXOR(v, config)
+ return rewriteValueMIPS_OpMIPSXOR(v)
case OpMIPSXORconst:
- return rewriteValueMIPS_OpMIPSXORconst(v, config)
+ return rewriteValueMIPS_OpMIPSXORconst(v)
case OpMod16:
- return rewriteValueMIPS_OpMod16(v, config)
+ return rewriteValueMIPS_OpMod16(v)
case OpMod16u:
- return rewriteValueMIPS_OpMod16u(v, config)
+ return rewriteValueMIPS_OpMod16u(v)
case OpMod32:
- return rewriteValueMIPS_OpMod32(v, config)
+ return rewriteValueMIPS_OpMod32(v)
case OpMod32u:
- return rewriteValueMIPS_OpMod32u(v, config)
+ return rewriteValueMIPS_OpMod32u(v)
case OpMod8:
- return rewriteValueMIPS_OpMod8(v, config)
+ return rewriteValueMIPS_OpMod8(v)
case OpMod8u:
- return rewriteValueMIPS_OpMod8u(v, config)
+ return rewriteValueMIPS_OpMod8u(v)
case OpMove:
- return rewriteValueMIPS_OpMove(v, config)
+ return rewriteValueMIPS_OpMove(v)
case OpMul16:
- return rewriteValueMIPS_OpMul16(v, config)
+ return rewriteValueMIPS_OpMul16(v)
case OpMul32:
- return rewriteValueMIPS_OpMul32(v, config)
+ return rewriteValueMIPS_OpMul32(v)
case OpMul32F:
- return rewriteValueMIPS_OpMul32F(v, config)
+ return rewriteValueMIPS_OpMul32F(v)
case OpMul32uhilo:
- return rewriteValueMIPS_OpMul32uhilo(v, config)
+ return rewriteValueMIPS_OpMul32uhilo(v)
case OpMul64F:
- return rewriteValueMIPS_OpMul64F(v, config)
+ return rewriteValueMIPS_OpMul64F(v)
case OpMul8:
- return rewriteValueMIPS_OpMul8(v, config)
+ return rewriteValueMIPS_OpMul8(v)
case OpNeg16:
- return rewriteValueMIPS_OpNeg16(v, config)
+ return rewriteValueMIPS_OpNeg16(v)
case OpNeg32:
- return rewriteValueMIPS_OpNeg32(v, config)
+ return rewriteValueMIPS_OpNeg32(v)
case OpNeg32F:
- return rewriteValueMIPS_OpNeg32F(v, config)
+ return rewriteValueMIPS_OpNeg32F(v)
case OpNeg64F:
- return rewriteValueMIPS_OpNeg64F(v, config)
+ return rewriteValueMIPS_OpNeg64F(v)
case OpNeg8:
- return rewriteValueMIPS_OpNeg8(v, config)
+ return rewriteValueMIPS_OpNeg8(v)
case OpNeq16:
- return rewriteValueMIPS_OpNeq16(v, config)
+ return rewriteValueMIPS_OpNeq16(v)
case OpNeq32:
- return rewriteValueMIPS_OpNeq32(v, config)
+ return rewriteValueMIPS_OpNeq32(v)
case OpNeq32F:
- return rewriteValueMIPS_OpNeq32F(v, config)
+ return rewriteValueMIPS_OpNeq32F(v)
case OpNeq64F:
- return rewriteValueMIPS_OpNeq64F(v, config)
+ return rewriteValueMIPS_OpNeq64F(v)
case OpNeq8:
- return rewriteValueMIPS_OpNeq8(v, config)
+ return rewriteValueMIPS_OpNeq8(v)
case OpNeqB:
- return rewriteValueMIPS_OpNeqB(v, config)
+ return rewriteValueMIPS_OpNeqB(v)
case OpNeqPtr:
- return rewriteValueMIPS_OpNeqPtr(v, config)
+ return rewriteValueMIPS_OpNeqPtr(v)
case OpNilCheck:
- return rewriteValueMIPS_OpNilCheck(v, config)
+ return rewriteValueMIPS_OpNilCheck(v)
case OpNot:
- return rewriteValueMIPS_OpNot(v, config)
+ return rewriteValueMIPS_OpNot(v)
case OpOffPtr:
- return rewriteValueMIPS_OpOffPtr(v, config)
+ return rewriteValueMIPS_OpOffPtr(v)
case OpOr16:
- return rewriteValueMIPS_OpOr16(v, config)
+ return rewriteValueMIPS_OpOr16(v)
case OpOr32:
- return rewriteValueMIPS_OpOr32(v, config)
+ return rewriteValueMIPS_OpOr32(v)
case OpOr8:
- return rewriteValueMIPS_OpOr8(v, config)
+ return rewriteValueMIPS_OpOr8(v)
case OpOrB:
- return rewriteValueMIPS_OpOrB(v, config)
+ return rewriteValueMIPS_OpOrB(v)
case OpRound32F:
- return rewriteValueMIPS_OpRound32F(v, config)
+ return rewriteValueMIPS_OpRound32F(v)
case OpRound64F:
- return rewriteValueMIPS_OpRound64F(v, config)
+ return rewriteValueMIPS_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValueMIPS_OpRsh16Ux16(v, config)
+ return rewriteValueMIPS_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValueMIPS_OpRsh16Ux32(v, config)
+ return rewriteValueMIPS_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValueMIPS_OpRsh16Ux64(v, config)
+ return rewriteValueMIPS_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValueMIPS_OpRsh16Ux8(v, config)
+ return rewriteValueMIPS_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValueMIPS_OpRsh16x16(v, config)
+ return rewriteValueMIPS_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValueMIPS_OpRsh16x32(v, config)
+ return rewriteValueMIPS_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValueMIPS_OpRsh16x64(v, config)
+ return rewriteValueMIPS_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValueMIPS_OpRsh16x8(v, config)
+ return rewriteValueMIPS_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValueMIPS_OpRsh32Ux16(v, config)
+ return rewriteValueMIPS_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValueMIPS_OpRsh32Ux32(v, config)
+ return rewriteValueMIPS_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValueMIPS_OpRsh32Ux64(v, config)
+ return rewriteValueMIPS_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValueMIPS_OpRsh32Ux8(v, config)
+ return rewriteValueMIPS_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValueMIPS_OpRsh32x16(v, config)
+ return rewriteValueMIPS_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValueMIPS_OpRsh32x32(v, config)
+ return rewriteValueMIPS_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValueMIPS_OpRsh32x64(v, config)
+ return rewriteValueMIPS_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValueMIPS_OpRsh32x8(v, config)
+ return rewriteValueMIPS_OpRsh32x8(v)
case OpRsh8Ux16:
- return rewriteValueMIPS_OpRsh8Ux16(v, config)
+ return rewriteValueMIPS_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValueMIPS_OpRsh8Ux32(v, config)
+ return rewriteValueMIPS_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValueMIPS_OpRsh8Ux64(v, config)
+ return rewriteValueMIPS_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValueMIPS_OpRsh8Ux8(v, config)
+ return rewriteValueMIPS_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValueMIPS_OpRsh8x16(v, config)
+ return rewriteValueMIPS_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValueMIPS_OpRsh8x32(v, config)
+ return rewriteValueMIPS_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValueMIPS_OpRsh8x64(v, config)
+ return rewriteValueMIPS_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValueMIPS_OpRsh8x8(v, config)
+ return rewriteValueMIPS_OpRsh8x8(v)
case OpSelect0:
- return rewriteValueMIPS_OpSelect0(v, config)
+ return rewriteValueMIPS_OpSelect0(v)
case OpSelect1:
- return rewriteValueMIPS_OpSelect1(v, config)
+ return rewriteValueMIPS_OpSelect1(v)
case OpSignExt16to32:
- return rewriteValueMIPS_OpSignExt16to32(v, config)
+ return rewriteValueMIPS_OpSignExt16to32(v)
case OpSignExt8to16:
- return rewriteValueMIPS_OpSignExt8to16(v, config)
+ return rewriteValueMIPS_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValueMIPS_OpSignExt8to32(v, config)
+ return rewriteValueMIPS_OpSignExt8to32(v)
case OpSignmask:
- return rewriteValueMIPS_OpSignmask(v, config)
+ return rewriteValueMIPS_OpSignmask(v)
case OpSlicemask:
- return rewriteValueMIPS_OpSlicemask(v, config)
+ return rewriteValueMIPS_OpSlicemask(v)
case OpSqrt:
- return rewriteValueMIPS_OpSqrt(v, config)
+ return rewriteValueMIPS_OpSqrt(v)
case OpStaticCall:
- return rewriteValueMIPS_OpStaticCall(v, config)
+ return rewriteValueMIPS_OpStaticCall(v)
case OpStore:
- return rewriteValueMIPS_OpStore(v, config)
+ return rewriteValueMIPS_OpStore(v)
case OpSub16:
- return rewriteValueMIPS_OpSub16(v, config)
+ return rewriteValueMIPS_OpSub16(v)
case OpSub32:
- return rewriteValueMIPS_OpSub32(v, config)
+ return rewriteValueMIPS_OpSub32(v)
case OpSub32F:
- return rewriteValueMIPS_OpSub32F(v, config)
+ return rewriteValueMIPS_OpSub32F(v)
case OpSub32withcarry:
- return rewriteValueMIPS_OpSub32withcarry(v, config)
+ return rewriteValueMIPS_OpSub32withcarry(v)
case OpSub64F:
- return rewriteValueMIPS_OpSub64F(v, config)
+ return rewriteValueMIPS_OpSub64F(v)
case OpSub8:
- return rewriteValueMIPS_OpSub8(v, config)
+ return rewriteValueMIPS_OpSub8(v)
case OpSubPtr:
- return rewriteValueMIPS_OpSubPtr(v, config)
+ return rewriteValueMIPS_OpSubPtr(v)
case OpTrunc16to8:
- return rewriteValueMIPS_OpTrunc16to8(v, config)
+ return rewriteValueMIPS_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValueMIPS_OpTrunc32to16(v, config)
+ return rewriteValueMIPS_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValueMIPS_OpTrunc32to8(v, config)
+ return rewriteValueMIPS_OpTrunc32to8(v)
case OpXor16:
- return rewriteValueMIPS_OpXor16(v, config)
+ return rewriteValueMIPS_OpXor16(v)
case OpXor32:
- return rewriteValueMIPS_OpXor32(v, config)
+ return rewriteValueMIPS_OpXor32(v)
case OpXor8:
- return rewriteValueMIPS_OpXor8(v, config)
+ return rewriteValueMIPS_OpXor8(v)
case OpZero:
- return rewriteValueMIPS_OpZero(v, config)
+ return rewriteValueMIPS_OpZero(v)
case OpZeroExt16to32:
- return rewriteValueMIPS_OpZeroExt16to32(v, config)
+ return rewriteValueMIPS_OpZeroExt16to32(v)
case OpZeroExt8to16:
- return rewriteValueMIPS_OpZeroExt8to16(v, config)
+ return rewriteValueMIPS_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValueMIPS_OpZeroExt8to32(v, config)
+ return rewriteValueMIPS_OpZeroExt8to32(v)
case OpZeromask:
- return rewriteValueMIPS_OpZeromask(v, config)
+ return rewriteValueMIPS_OpZeromask(v)
}
return false
}
-func rewriteValueMIPS_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAdd16(v *Value) bool {
// match: (Add16 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueMIPS_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAdd32(v *Value) bool {
// match: (Add32 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueMIPS_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAdd32F(v *Value) bool {
// match: (Add32F x y)
// cond:
// result: (ADDF x y)
return true
}
}
-func rewriteValueMIPS_OpAdd32withcarry(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpAdd32withcarry(v *Value) bool {
b := v.Block
_ = b
// match: (Add32withcarry <t> x y c)
return true
}
}
-func rewriteValueMIPS_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAdd64F(v *Value) bool {
// match: (Add64F x y)
// cond:
// result: (ADDD x y)
return true
}
}
-func rewriteValueMIPS_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAdd8(v *Value) bool {
// match: (Add8 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueMIPS_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAddPtr(v *Value) bool {
// match: (AddPtr x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueMIPS_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAddr(v *Value) bool {
// match: (Addr {sym} base)
// cond:
// result: (MOVWaddr {sym} base)
return true
}
}
-func rewriteValueMIPS_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAnd16(v *Value) bool {
// match: (And16 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAnd32(v *Value) bool {
// match: (And32 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAnd8(v *Value) bool {
// match: (And8 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAndB(v *Value) bool {
// match: (AndB x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS_OpAtomicAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAtomicAdd32(v *Value) bool {
// match: (AtomicAdd32 ptr val mem)
// cond:
// result: (LoweredAtomicAdd ptr val mem)
return true
}
}
-func rewriteValueMIPS_OpAtomicAnd8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicAnd8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) (OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) (SLLconst <config.fe.TypeUInt32()> [3] (ANDconst <config.fe.TypeUInt32()> [3] ptr))) (NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3] (ANDconst <config.fe.TypeUInt32()> [3] (XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
+ // result: (LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) (OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val) (SLLconst <fe.TypeUInt32()> [3] (ANDconst <fe.TypeUInt32()> [3] ptr))) (NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3] (ANDconst <fe.TypeUInt32()> [3] (XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
break
}
v.reset(OpMIPSLoweredAtomicAnd)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, config.fe.TypeUInt32().PtrTo())
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, fe.TypeUInt32().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSOR, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpMIPSSLL, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v4.AddArg(val)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSANDconst, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, fe.TypeUInt32())
v6.AuxInt = 3
v6.AddArg(ptr)
v5.AddArg(v6)
v3.AddArg(v5)
v2.AddArg(v3)
- v7 := b.NewValue0(v.Pos, OpMIPSNORconst, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpMIPSNORconst, fe.TypeUInt32())
v7.AuxInt = 0
- v8 := b.NewValue0(v.Pos, OpMIPSSLL, config.fe.TypeUInt32())
- v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpMIPSSLL, fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v9.AuxInt = 0xff
v8.AddArg(v9)
- v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v10 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v10.AuxInt = 3
- v11 := b.NewValue0(v.Pos, OpMIPSANDconst, config.fe.TypeUInt32())
+ v11 := b.NewValue0(v.Pos, OpMIPSANDconst, fe.TypeUInt32())
v11.AuxInt = 3
- v12 := b.NewValue0(v.Pos, OpMIPSXORconst, config.fe.TypeUInt32())
+ v12 := b.NewValue0(v.Pos, OpMIPSXORconst, fe.TypeUInt32())
v12.AuxInt = 3
v12.AddArg(ptr)
v11.AddArg(v12)
}
// match: (AtomicAnd8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) (OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) (SLLconst <config.fe.TypeUInt32()> [3] (ANDconst <config.fe.TypeUInt32()> [3] (XORconst <config.fe.TypeUInt32()> [3] ptr)))) (NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3] (ANDconst <config.fe.TypeUInt32()> [3] (XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
+ // result: (LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) (OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val) (SLLconst <fe.TypeUInt32()> [3] (ANDconst <fe.TypeUInt32()> [3] (XORconst <fe.TypeUInt32()> [3] ptr)))) (NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3] (ANDconst <fe.TypeUInt32()> [3] (XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
break
}
v.reset(OpMIPSLoweredAtomicAnd)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, config.fe.TypeUInt32().PtrTo())
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, fe.TypeUInt32().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSOR, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpMIPSSLL, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v4.AddArg(val)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSANDconst, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, fe.TypeUInt32())
v6.AuxInt = 3
- v7 := b.NewValue0(v.Pos, OpMIPSXORconst, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpMIPSXORconst, fe.TypeUInt32())
v7.AuxInt = 3
v7.AddArg(ptr)
v6.AddArg(v7)
v5.AddArg(v6)
v3.AddArg(v5)
v2.AddArg(v3)
- v8 := b.NewValue0(v.Pos, OpMIPSNORconst, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpMIPSNORconst, fe.TypeUInt32())
v8.AuxInt = 0
- v9 := b.NewValue0(v.Pos, OpMIPSSLL, config.fe.TypeUInt32())
- v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpMIPSSLL, fe.TypeUInt32())
+ v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v10.AuxInt = 0xff
v9.AddArg(v10)
- v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v11 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v11.AuxInt = 3
- v12 := b.NewValue0(v.Pos, OpMIPSANDconst, config.fe.TypeUInt32())
+ v12 := b.NewValue0(v.Pos, OpMIPSANDconst, fe.TypeUInt32())
v12.AuxInt = 3
- v13 := b.NewValue0(v.Pos, OpMIPSXORconst, config.fe.TypeUInt32())
+ v13 := b.NewValue0(v.Pos, OpMIPSXORconst, fe.TypeUInt32())
v13.AuxInt = 3
v13.AddArg(ptr)
v12.AddArg(v13)
}
return false
}
-func rewriteValueMIPS_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAtomicCompareAndSwap32(v *Value) bool {
// match: (AtomicCompareAndSwap32 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas ptr old new_ mem)
return true
}
}
-func rewriteValueMIPS_OpAtomicExchange32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAtomicExchange32(v *Value) bool {
// match: (AtomicExchange32 ptr val mem)
// cond:
// result: (LoweredAtomicExchange ptr val mem)
return true
}
}
-func rewriteValueMIPS_OpAtomicLoad32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAtomicLoad32(v *Value) bool {
// match: (AtomicLoad32 ptr mem)
// cond:
// result: (LoweredAtomicLoad ptr mem)
return true
}
}
-func rewriteValueMIPS_OpAtomicLoadPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAtomicLoadPtr(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
// result: (LoweredAtomicLoad ptr mem)
return true
}
}
-func rewriteValueMIPS_OpAtomicOr8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicOr8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) (SLLconst <config.fe.TypeUInt32()> [3] (ANDconst <config.fe.TypeUInt32()> [3] ptr))) mem)
+ // result: (LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) (SLL <fe.TypeUInt32()> (ZeroExt8to32 val) (SLLconst <fe.TypeUInt32()> [3] (ANDconst <fe.TypeUInt32()> [3] ptr))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
break
}
v.reset(OpMIPSLoweredAtomicOr)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, config.fe.TypeUInt32().PtrTo())
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, fe.TypeUInt32().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSSLL, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v3.AddArg(val)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v4.AuxInt = 3
- v5 := b.NewValue0(v.Pos, OpMIPSANDconst, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, fe.TypeUInt32())
v5.AuxInt = 3
v5.AddArg(ptr)
v4.AddArg(v5)
}
// match: (AtomicOr8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) (SLLconst <config.fe.TypeUInt32()> [3] (ANDconst <config.fe.TypeUInt32()> [3] (XORconst <config.fe.TypeUInt32()> [3] ptr)))) mem)
+ // result: (LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) (SLL <fe.TypeUInt32()> (ZeroExt8to32 val) (SLLconst <fe.TypeUInt32()> [3] (ANDconst <fe.TypeUInt32()> [3] (XORconst <fe.TypeUInt32()> [3] ptr)))) mem)
for {
ptr := v.Args[0]
val := v.Args[1]
break
}
v.reset(OpMIPSLoweredAtomicOr)
- v0 := b.NewValue0(v.Pos, OpMIPSAND, config.fe.TypeUInt32().PtrTo())
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, fe.TypeUInt32().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = ^3
v0.AddArg(v1)
v0.AddArg(ptr)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSSLL, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v3.AddArg(val)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v4.AuxInt = 3
- v5 := b.NewValue0(v.Pos, OpMIPSANDconst, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, fe.TypeUInt32())
v5.AuxInt = 3
- v6 := b.NewValue0(v.Pos, OpMIPSXORconst, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpMIPSXORconst, fe.TypeUInt32())
v6.AuxInt = 3
v6.AddArg(ptr)
v5.AddArg(v6)
}
return false
}
-func rewriteValueMIPS_OpAtomicStore32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAtomicStore32(v *Value) bool {
// match: (AtomicStore32 ptr val mem)
// cond:
// result: (LoweredAtomicStore ptr val mem)
return true
}
}
-func rewriteValueMIPS_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpAtomicStorePtrNoWB(v *Value) bool {
// match: (AtomicStorePtrNoWB ptr val mem)
// cond:
// result: (LoweredAtomicStore ptr val mem)
return true
}
}
-func rewriteValueMIPS_OpAvg32u(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpAvg32u(v *Value) bool {
b := v.Block
_ = b
// match: (Avg32u <t> x y)
return true
}
}
-func rewriteValueMIPS_OpBitLen32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpBitLen32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (BitLen32 <t> x)
// cond:
// result: (SUB (MOVWconst [32]) (CLZ <t> x))
t := v.Type
x := v.Args[0]
v.reset(OpMIPSSUB)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 32
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
return true
}
}
-func rewriteValueMIPS_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpClosureCall(v *Value) bool {
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
return true
}
}
-func rewriteValueMIPS_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCom16(v *Value) bool {
// match: (Com16 x)
// cond:
// result: (NORconst [0] x)
return true
}
}
-func rewriteValueMIPS_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCom32(v *Value) bool {
// match: (Com32 x)
// cond:
// result: (NORconst [0] x)
return true
}
}
-func rewriteValueMIPS_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCom8(v *Value) bool {
// match: (Com8 x)
// cond:
// result: (NORconst [0] x)
return true
}
}
-func rewriteValueMIPS_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpConst16(v *Value) bool {
// match: (Const16 [val])
// cond:
// result: (MOVWconst [val])
return true
}
}
-func rewriteValueMIPS_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpConst32(v *Value) bool {
// match: (Const32 [val])
// cond:
// result: (MOVWconst [val])
return true
}
}
-func rewriteValueMIPS_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// cond:
// result: (MOVFconst [val])
return true
}
}
-func rewriteValueMIPS_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueMIPS_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpConst8(v *Value) bool {
// match: (Const8 [val])
// cond:
// result: (MOVWconst [val])
return true
}
}
-func rewriteValueMIPS_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// cond:
// result: (MOVWconst [b])
return true
}
}
-func rewriteValueMIPS_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpConstNil(v *Value) bool {
// match: (ConstNil)
// cond:
// result: (MOVWconst [0])
return true
}
}
-func rewriteValueMIPS_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpConvert(v *Value) bool {
// match: (Convert x mem)
// cond:
// result: (MOVWconvert x mem)
return true
}
}
-func rewriteValueMIPS_OpCtz32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpCtz32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Ctz32 <t> x)
// cond:
// result: (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
t := v.Type
x := v.Args[0]
v.reset(OpMIPSSUB)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 32
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
return true
}
}
-func rewriteValueMIPS_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCvt32Fto32(v *Value) bool {
// match: (Cvt32Fto32 x)
// cond:
// result: (TRUNCFW x)
return true
}
}
-func rewriteValueMIPS_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F x)
// cond:
// result: (MOVFD x)
return true
}
}
-func rewriteValueMIPS_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCvt32to32F(v *Value) bool {
// match: (Cvt32to32F x)
// cond:
// result: (MOVWF x)
return true
}
}
-func rewriteValueMIPS_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCvt32to64F(v *Value) bool {
// match: (Cvt32to64F x)
// cond:
// result: (MOVWD x)
return true
}
}
-func rewriteValueMIPS_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCvt64Fto32(v *Value) bool {
// match: (Cvt64Fto32 x)
// cond:
// result: (TRUNCDW x)
return true
}
}
-func rewriteValueMIPS_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F x)
// cond:
// result: (MOVDF x)
return true
}
}
-func rewriteValueMIPS_OpDiv16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpDiv16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16 x y)
// cond:
// result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpDiv16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16u x y)
// cond:
// result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpDiv32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpDiv32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32 x y)
// cond:
// result: (Select1 (DIV x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpDiv32F(v *Value) bool {
// match: (Div32F x y)
// cond:
// result: (DIVF x y)
return true
}
}
-func rewriteValueMIPS_OpDiv32u(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpDiv32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32u x y)
// cond:
// result: (Select1 (DIVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpDiv64F(v *Value) bool {
// match: (Div64F x y)
// cond:
// result: (DIVD x y)
return true
}
}
-func rewriteValueMIPS_OpDiv8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 x y)
// cond:
// result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u x y)
// cond:
// result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpEq16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpEq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq16 x y)
// cond:
// result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpEq32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpEq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq32 x y)
// cond:
// result: (SGTUconst [1] (XOR x y))
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeUInt32())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpEq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpEq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32F x y)
return true
}
}
-func rewriteValueMIPS_OpEq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpEq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64F x y)
return true
}
}
-func rewriteValueMIPS_OpEq8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpEq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq8 x y)
// cond:
// result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpEqB(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpEqB(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqB x y)
// cond:
- // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+ // result: (XORconst [1] (XOR <fe.TypeBool()> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeBool())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqPtr x y)
// cond:
// result: (SGTUconst [1] (XOR x y))
y := v.Args[1]
v.reset(OpMIPSSGTUconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeUInt32())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt16to32 y) (SignExt16to32 x)))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpGeq32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq32 x y)
// cond:
// result: (XORconst [1] (SGT y x))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, fe.TypeBool())
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32F x y)
return true
}
}
-func rewriteValueMIPS_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGeq32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq32U x y)
// cond:
// result: (XORconst [1] (SGTU y x))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, fe.TypeBool())
v0.AddArg(y)
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64F x y)
return true
}
}
-func rewriteValueMIPS_OpGeq8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt8to32 y) (SignExt8to32 x)))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(x)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpGetClosurePtr(v *Value) bool {
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
return true
}
}
-func rewriteValueMIPS_OpGreater16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGreater16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16 x y)
// cond:
// result: (SGT (SignExt16to32 x) (SignExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGreater16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16U x y)
// cond:
// result: (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpGreater32(v *Value) bool {
// match: (Greater32 x y)
// cond:
// result: (SGT x y)
return true
}
}
-func rewriteValueMIPS_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGreater32F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32F x y)
return true
}
}
-func rewriteValueMIPS_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpGreater32U(v *Value) bool {
// match: (Greater32U x y)
// cond:
// result: (SGTU x y)
return true
}
}
-func rewriteValueMIPS_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGreater64F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64F x y)
return true
}
}
-func rewriteValueMIPS_OpGreater8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGreater8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8 x y)
// cond:
// result: (SGT (SignExt8to32 x) (SignExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpGreater8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8U x y)
// cond:
// result: (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpHmul32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpHmul32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul32 x y)
// cond:
// result: (Select0 (MULT x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSMULT, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0 := b.NewValue0(v.Pos, OpMIPSMULT, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpHmul32u(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpHmul32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul32u x y)
// cond:
// result: (Select0 (MULTU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpInterCall(v *Value) bool {
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
return true
}
}
-func rewriteValueMIPS_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpIsInBounds(v *Value) bool {
// match: (IsInBounds idx len)
// cond:
// result: (SGTU len idx)
return true
}
}
-func rewriteValueMIPS_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpIsNonNil(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (IsNonNil ptr)
// cond:
// result: (SGTU ptr (MOVWconst [0]))
ptr := v.Args[0]
v.reset(OpMIPSSGTU)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpIsSliceInBounds(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (IsSliceInBounds idx len)
// cond:
// result: (XORconst [1] (SGTU idx len))
len := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, fe.TypeBool())
v0.AddArg(idx)
v0.AddArg(len)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq32 x y)
// cond:
// result: (XORconst [1] (SGT x y))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, fe.TypeBool())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
return true
}
}
-func rewriteValueMIPS_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLeq32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq32U x y)
// cond:
// result: (XORconst [1] (SGTU x y))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, fe.TypeBool())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64F x y)
return true
}
}
-func rewriteValueMIPS_OpLeq8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8 x y)
// cond:
// result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8U x y)
// cond:
// result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpMIPSXORconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpLess16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLess16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16 x y)
// cond:
// result: (SGT (SignExt16to32 y) (SignExt16to32 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpLess16U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLess16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16U x y)
// cond:
// result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpLess32(v *Value) bool {
// match: (Less32 x y)
// cond:
// result: (SGT y x)
return true
}
}
-func rewriteValueMIPS_OpLess32F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLess32F(v *Value) bool {
b := v.Block
_ = b
// match: (Less32F x y)
return true
}
}
-func rewriteValueMIPS_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpLess32U(v *Value) bool {
// match: (Less32U x y)
// cond:
// result: (SGTU y x)
return true
}
}
-func rewriteValueMIPS_OpLess64F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLess64F(v *Value) bool {
b := v.Block
_ = b
// match: (Less64F x y)
return true
}
}
-func rewriteValueMIPS_OpLess8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLess8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8 x y)
// cond:
// result: (SGT (SignExt8to32 y) (SignExt8to32 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpLess8U(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLess8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8U x y)
// cond:
// result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpLoad(v *Value) bool {
// match: (Load <t> ptr mem)
// cond: t.IsBoolean()
// result: (MOVBUload ptr mem)
}
return false
}
-func rewriteValueMIPS_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
return true
}
}
-func rewriteValueMIPS_OpLsh16x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpLsh16x64(v *Value) bool {
// match: (Lsh16x64 x (Const64 [c]))
// cond: uint32(c) < 16
// result: (SLLconst x [c])
}
return false
}
-func rewriteValueMIPS_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
return true
}
}
-func rewriteValueMIPS_OpLsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpLsh32x64(v *Value) bool {
// match: (Lsh32x64 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SLLconst x [c])
}
return false
}
-func rewriteValueMIPS_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x16 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x32 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
return true
}
}
-func rewriteValueMIPS_OpLsh8x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpLsh8x64(v *Value) bool {
// match: (Lsh8x64 x (Const64 [c]))
// cond: uint32(c) < 8
// result: (SLLconst x [c])
}
return false
}
-func rewriteValueMIPS_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x8 <t> x y)
// cond:
// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpMIPSADD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSADD(v *Value) bool {
// match: (ADD (MOVWconst [c]) x)
// cond:
// result: (ADDconst [c] x)
}
return false
}
-func rewriteValueMIPS_OpMIPSADDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool {
// match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
// cond:
// result: (MOVWaddr [off1+off2] {sym} ptr)
}
return false
}
-func rewriteValueMIPS_OpMIPSAND(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMIPSAND(v *Value) bool {
b := v.Block
_ = b
// match: (AND (MOVWconst [c]) x)
}
return false
}
-func rewriteValueMIPS_OpMIPSANDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool {
// match: (ANDconst [0] _)
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueMIPS_OpMIPSCMOVZ(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool {
b := v.Block
_ = b
// match: (CMOVZ _ b (MOVWconst [0]))
}
return false
}
-func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value) bool {
// match: (CMOVZzero _ (MOVWconst [0]))
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value) bool {
// match: (LoweredAtomicAdd ptr (MOVWconst [c]) mem)
// cond: is16Bit(c)
// result: (LoweredAtomicAddconst [c] ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSLoweredAtomicStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSLoweredAtomicStore(v *Value) bool {
// match: (LoweredAtomicStore ptr (MOVWconst [0]) mem)
// cond:
// result: (LoweredAtomicStorezero ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVBUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool {
// match: (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVBUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBUreg x:(MOVBUload _ _))
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool {
// match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVBload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVBreg(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBreg x:(MOVBload _ _))
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool {
// match: (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool {
// match: (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool {
// match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVDload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool {
// match: (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVFload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool {
// match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVFload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVFstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool {
// match: (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVFstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVHUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool {
// match: (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVHUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVHUreg x:(MOVBUload _ _))
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVHload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool {
// match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVHload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVHreg(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVHreg x:(MOVBload _ _))
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool {
// match: (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVHstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool {
// match: (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVWload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool {
// match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVWload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVWreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVWreg(v *Value) bool {
// match: (MOVWreg x)
// cond: x.Uses == 1
// result: (MOVWnop x)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool {
// match: (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool {
// match: (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
// cond: (is16Bit(off1+off2) || x.Uses == 1)
// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS_OpMIPSMUL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
// match: (MUL (MOVWconst [0]) _ )
// cond:
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueMIPS_OpMIPSNEG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSNEG(v *Value) bool {
// match: (NEG (MOVWconst [c]))
// cond:
// result: (MOVWconst [int64(int32(-c))])
}
return false
}
-func rewriteValueMIPS_OpMIPSNOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSNOR(v *Value) bool {
// match: (NOR (MOVWconst [c]) x)
// cond:
// result: (NORconst [c] x)
}
return false
}
-func rewriteValueMIPS_OpMIPSNORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool {
// match: (NORconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [^(c|d)])
}
return false
}
-func rewriteValueMIPS_OpMIPSOR(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
b := v.Block
_ = b
// match: (OR (MOVWconst [c]) x)
}
return false
}
-func rewriteValueMIPS_OpMIPSORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSORconst(v *Value) bool {
// match: (ORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueMIPS_OpMIPSSGT(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSGT(v *Value) bool {
// match: (SGT (MOVWconst [c]) x)
// cond:
// result: (SGTconst [c] x)
}
return false
}
-func rewriteValueMIPS_OpMIPSSGTU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSGTU(v *Value) bool {
// match: (SGTU (MOVWconst [c]) x)
// cond:
// result: (SGTUconst [c] x)
}
return false
}
-func rewriteValueMIPS_OpMIPSSGTUconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSGTUconst(v *Value) bool {
// match: (SGTUconst [c] (MOVWconst [d]))
// cond: uint32(c)>uint32(d)
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueMIPS_OpMIPSSGTUzero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSGTUzero(v *Value) bool {
// match: (SGTUzero (MOVWconst [d]))
// cond: uint32(d) != 0
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueMIPS_OpMIPSSGTconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSGTconst(v *Value) bool {
// match: (SGTconst [c] (MOVWconst [d]))
// cond: int32(c) > int32(d)
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueMIPS_OpMIPSSGTzero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSGTzero(v *Value) bool {
// match: (SGTzero (MOVWconst [d]))
// cond: int32(d) > 0
// result: (MOVWconst [1])
}
return false
}
-func rewriteValueMIPS_OpMIPSSLL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSLL(v *Value) bool {
// match: (SLL _ (MOVWconst [c]))
// cond: uint32(c)>=32
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueMIPS_OpMIPSSLLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSLLconst(v *Value) bool {
// match: (SLLconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [int64(int32(uint32(d)<<uint32(c)))])
}
return false
}
-func rewriteValueMIPS_OpMIPSSRA(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSRA(v *Value) bool {
// match: (SRA x (MOVWconst [c]))
// cond: uint32(c)>=32
// result: (SRAconst x [31])
}
return false
}
-func rewriteValueMIPS_OpMIPSSRAconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSRAconst(v *Value) bool {
// match: (SRAconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [int64(int32(d)>>uint32(c))])
}
return false
}
-func rewriteValueMIPS_OpMIPSSRL(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSRL(v *Value) bool {
// match: (SRL _ (MOVWconst [c]))
// cond: uint32(c)>=32
// result: (MOVWconst [0])
}
return false
}
-func rewriteValueMIPS_OpMIPSSRLconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSRLconst(v *Value) bool {
// match: (SRLconst [c] (MOVWconst [d]))
// cond:
// result: (MOVWconst [int64(uint32(d)>>uint32(c))])
}
return false
}
-func rewriteValueMIPS_OpMIPSSUB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSUB(v *Value) bool {
// match: (SUB x (MOVWconst [c]))
// cond:
// result: (SUBconst [c] x)
}
return false
}
-func rewriteValueMIPS_OpMIPSSUBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool {
// match: (SUBconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueMIPS_OpMIPSXOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSXOR(v *Value) bool {
// match: (XOR (MOVWconst [c]) x)
// cond:
// result: (XORconst [c] x)
}
return false
}
-func rewriteValueMIPS_OpMIPSXORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool {
// match: (XORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueMIPS_OpMod16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMod16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16 x y)
// cond:
// result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpMod16u(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMod16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16u x y)
// cond:
// result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpMod32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMod32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32 x y)
// cond:
// result: (Select0 (DIV x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpMod32u(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMod32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32u x y)
// cond:
// result: (Select0 (DIVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpMod8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMod8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8 x y)
// cond:
// result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpMod8u(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMod8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8u x y)
// cond:
// result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS_OpMove(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpMove(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Move [0] _ _ mem)
// cond:
// result: mem
mem := v.Args[2]
v.reset(OpMIPSMOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
}
v.reset(OpMIPSMOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpMIPSMOVBstore)
v.AuxInt = 1
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
}
v.reset(OpMIPSMOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpMIPSMOVHstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, fe.TypeUInt16())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, fe.TypeUInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpMIPSMOVBstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
v.reset(OpMIPSMOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, config.fe.TypeUInt8())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, fe.TypeUInt8())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v.reset(OpMIPSMOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpMIPSMOVHstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, fe.TypeInt16())
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, config.fe.TypeInt16())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, fe.TypeInt16())
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v3.AuxInt = 2
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, config.fe.TypeInt16())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, fe.TypeInt16())
v4.AuxInt = 2
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, config.fe.TypeInt16())
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, fe.TypeInt16())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
v.reset(OpMIPSMOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, fe.TypeInt16())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, config.fe.TypeInt16())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, fe.TypeInt16())
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, config.fe.TypeInt16())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, fe.TypeInt16())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v.reset(OpMIPSMOVWstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v.reset(OpMIPSMOVWstore)
v.AuxInt = 12
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v0.AuxInt = 12
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v3.AuxInt = 4
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v4.AuxInt = 4
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, fe.TypeUInt32())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
}
return false
}
-func rewriteValueMIPS_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMul16(v *Value) bool {
// match: (Mul16 x y)
// cond:
// result: (MUL x y)
return true
}
}
-func rewriteValueMIPS_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMul32(v *Value) bool {
// match: (Mul32 x y)
// cond:
// result: (MUL x y)
return true
}
}
-func rewriteValueMIPS_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMul32F(v *Value) bool {
// match: (Mul32F x y)
// cond:
// result: (MULF x y)
return true
}
}
-func rewriteValueMIPS_OpMul32uhilo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMul32uhilo(v *Value) bool {
// match: (Mul32uhilo x y)
// cond:
// result: (MULTU x y)
return true
}
}
-func rewriteValueMIPS_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMul64F(v *Value) bool {
// match: (Mul64F x y)
// cond:
// result: (MULD x y)
return true
}
}
-func rewriteValueMIPS_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpMul8(v *Value) bool {
// match: (Mul8 x y)
// cond:
// result: (MUL x y)
return true
}
}
-func rewriteValueMIPS_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpNeg16(v *Value) bool {
// match: (Neg16 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValueMIPS_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpNeg32(v *Value) bool {
// match: (Neg32 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValueMIPS_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpNeg32F(v *Value) bool {
// match: (Neg32F x)
// cond:
// result: (NEGF x)
return true
}
}
-func rewriteValueMIPS_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpNeg64F(v *Value) bool {
// match: (Neg64F x)
// cond:
// result: (NEGD x)
return true
}
}
-func rewriteValueMIPS_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpNeg8(v *Value) bool {
// match: (Neg8 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValueMIPS_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpNeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq16 x y)
// cond:
// result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = 0
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpNeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq32 x y)
// cond:
// result: (SGTU (XOR x y) (MOVWconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeUInt32())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpNeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32F x y)
return true
}
}
-func rewriteValueMIPS_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpNeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
return true
}
}
-func rewriteValueMIPS_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpNeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq8 x y)
// cond:
// result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = 0
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpNeqB(v *Value) bool {
// match: (NeqB x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpNeqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (NeqPtr x y)
// cond:
// result: (SGTU (XOR x y) (MOVWconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSGTU)
- v0 := b.NewValue0(v.Pos, OpMIPSXOR, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, fe.TypeUInt32())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpNilCheck(v *Value) bool {
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
return true
}
}
-func rewriteValueMIPS_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpNot(v *Value) bool {
// match: (Not x)
// cond:
// result: (XORconst [1] x)
return true
}
}
-func rewriteValueMIPS_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpOffPtr(v *Value) bool {
// match: (OffPtr [off] ptr:(SP))
// cond:
// result: (MOVWaddr [off] ptr)
return true
}
}
-func rewriteValueMIPS_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpOr16(v *Value) bool {
// match: (Or16 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpOr32(v *Value) bool {
// match: (Or32 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpOr8(v *Value) bool {
// match: (Or8 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpOrB(v *Value) bool {
// match: (OrB x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpRound32F(v *Value) bool {
// match: (Round32F x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpRound64F(v *Value) bool {
// match: (Round64F x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
v3.AddArg(y)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ // result: (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpMIPSSRLconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
}
return false
}
-func rewriteValueMIPS_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x16 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueMIPS_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x32 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = -1
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
v3.AddArg(y)
v1.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint32(c) < 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+ // result: (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpMIPSSRAconst)
v.AuxInt = c + 16
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
}
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint32(c) >= 16
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+ // result: (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpMIPSSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v0.AuxInt = 16
v0.AddArg(x)
v.AddArg(v0)
}
return false
}
-func rewriteValueMIPS_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x8 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueMIPS_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v2.AuxInt = 32
v2.AddArg(y)
v.AddArg(v2)
return true
}
}
-func rewriteValueMIPS_OpRsh32Ux64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpRsh32Ux64(v *Value) bool {
// match: (Rsh32Ux64 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SRLconst x [c])
}
return false
}
-func rewriteValueMIPS_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x16 x y)
// cond:
- // result: (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = -1
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x32 x y)
// cond:
- // result: (SRA x ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA x ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = -1
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v2.AuxInt = 32
v2.AddArg(y)
v0.AddArg(v2)
return true
}
}
-func rewriteValueMIPS_OpRsh32x64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpRsh32x64(v *Value) bool {
// match: (Rsh32x64 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SRAconst x [c])
}
return false
}
-func rewriteValueMIPS_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x8 x y)
// cond:
- // result: (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = -1
v0.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
- v4 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v4.AddArg(y)
v3.AddArg(v4)
v0.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux16 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux32 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
v3.AddArg(y)
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ // result: (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpMIPSSRLconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
}
return false
}
-func rewriteValueMIPS_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux8 <t> x y)
// cond:
// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpMIPSCMOVZ)
v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = 0
v.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x16 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueMIPS_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x32 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = -1
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v3.AuxInt = 32
v3.AddArg(y)
v1.AddArg(v3)
return true
}
}
-func rewriteValueMIPS_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint32(c) < 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+ // result: (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpMIPSSRAconst)
v.AuxInt = c + 24
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
}
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint32(c) >= 8
- // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+ // result: (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpMIPSSRAconst)
v.AuxInt = 31
- v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, fe.TypeUInt32())
v0.AuxInt = 24
v0.AddArg(x)
v.AddArg(v0)
}
return false
}
-func rewriteValueMIPS_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x8 x y)
// cond:
- // result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPSSRA)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v3.AuxInt = -1
v1.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, config.fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, fe.TypeBool())
v4.AuxInt = 32
- v5 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v5.AddArg(y)
v4.AddArg(v5)
v1.AddArg(v4)
return true
}
}
-func rewriteValueMIPS_OpSelect0(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpSelect0(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Select0 (Add32carry <t> x y))
// cond:
// result: (ADD <t.FieldType(0)> x y)
break
}
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = c
v0.AddArg(v1)
v0.AddArg(x)
v0.AuxInt = -1
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v.AddArg(v1)
v.AddArg(x)
}
return false
}
-func rewriteValueMIPS_OpSelect1(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpSelect1(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Select1 (Add32carry <t> x y))
// cond:
- // result: (SGTU <config.fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
+ // result: (SGTU <fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
for {
v_0 := v.Args[0]
if v_0.Op != OpAdd32carry {
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpMIPSSGTU)
- v.Type = config.fe.TypeBool()
+ v.Type = fe.TypeBool()
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0))
v0.AddArg(x)
}
// match: (Select1 (Sub32carry <t> x y))
// cond:
- // result: (SGTU <config.fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
+ // result: (SGTU <fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
for {
v_0 := v.Args[0]
if v_0.Op != OpSub32carry {
x := v_0.Args[0]
y := v_0.Args[1]
v.reset(OpMIPSSGTU)
- v.Type = config.fe.TypeBool()
+ v.Type = fe.TypeBool()
v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0))
v0.AddArg(x)
v0.AddArg(y)
break
}
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = c
v0.AddArg(v1)
v0.AddArg(x)
}
return false
}
-func rewriteValueMIPS_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValueMIPS_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueMIPS_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueMIPS_OpSignmask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSignmask(v *Value) bool {
// match: (Signmask x)
// cond:
// result: (SRAconst x [31])
return true
}
}
-func rewriteValueMIPS_OpSlicemask(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpSlicemask(v *Value) bool {
b := v.Block
_ = b
// match: (Slicemask <t> x)
return true
}
}
-func rewriteValueMIPS_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSqrt(v *Value) bool {
// match: (Sqrt x)
// cond:
// result: (SQRTD x)
return true
}
}
-func rewriteValueMIPS_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpStaticCall(v *Value) bool {
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
return true
}
}
-func rewriteValueMIPS_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpStore(v *Value) bool {
// match: (Store {t} ptr val mem)
// cond: t.(Type).Size() == 1
// result: (MOVBstore ptr val mem)
}
return false
}
-func rewriteValueMIPS_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSub16(v *Value) bool {
// match: (Sub16 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueMIPS_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSub32(v *Value) bool {
// match: (Sub32 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueMIPS_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSub32F(v *Value) bool {
// match: (Sub32F x y)
// cond:
// result: (SUBF x y)
return true
}
}
-func rewriteValueMIPS_OpSub32withcarry(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpSub32withcarry(v *Value) bool {
b := v.Block
_ = b
// match: (Sub32withcarry <t> x y c)
return true
}
}
-func rewriteValueMIPS_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSub64F(v *Value) bool {
// match: (Sub64F x y)
// cond:
// result: (SUBD x y)
return true
}
}
-func rewriteValueMIPS_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSub8(v *Value) bool {
// match: (Sub8 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueMIPS_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpSubPtr(v *Value) bool {
// match: (SubPtr x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueMIPS_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS_OpXor16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpXor16(v *Value) bool {
// match: (Xor16 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS_OpXor32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpXor32(v *Value) bool {
// match: (Xor32 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS_OpXor8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpXor8(v *Value) bool {
// match: (Xor8 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS_OpZero(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpZero(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Zero [0] _ mem)
// cond:
// result: mem
mem := v.Args[1]
v.reset(OpMIPSMOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
}
v.reset(OpMIPSMOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v.reset(OpMIPSMOVBstore)
v.AuxInt = 1
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
}
v.reset(OpMIPSMOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v.reset(OpMIPSMOVHstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpMIPSMOVBstore)
v.AuxInt = 3
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v3.AuxInt = 1
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v4.AuxInt = 0
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
v.reset(OpMIPSMOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v1.AuxInt = 1
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
v.reset(OpMIPSMOVHstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
v.reset(OpMIPSMOVWstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpMIPSMOVWstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
v.reset(OpMIPSMOVWstore)
v.AuxInt = 12
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v1.AuxInt = 8
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v3.AuxInt = 4
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v4.AuxInt = 0
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
}
return false
}
-func rewriteValueMIPS_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVHUreg x)
return true
}
}
-func rewriteValueMIPS_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteValueMIPS_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteValueMIPS_OpZeromask(v *Value, config *Config) bool {
+func rewriteValueMIPS_OpZeromask(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Zeromask x)
// cond:
// result: (NEG (SGTU x (MOVWconst [0])))
for {
x := v.Args[0]
v.reset(OpMIPSNEG)
- v0 := b.NewValue0(v.Pos, OpMIPSSGTU, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, fe.TypeBool())
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, fe.TypeUInt32())
v1.AuxInt = 0
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
-func rewriteBlockMIPS(b *Block, config *Config) bool {
+func rewriteBlockMIPS(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case BlockMIPSEQ:
// match: (EQ (FPFlagTrue cmp) yes no)
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueMIPS64(v *Value, config *Config) bool {
+func rewriteValueMIPS64(v *Value) bool {
switch v.Op {
case OpAdd16:
- return rewriteValueMIPS64_OpAdd16(v, config)
+ return rewriteValueMIPS64_OpAdd16(v)
case OpAdd32:
- return rewriteValueMIPS64_OpAdd32(v, config)
+ return rewriteValueMIPS64_OpAdd32(v)
case OpAdd32F:
- return rewriteValueMIPS64_OpAdd32F(v, config)
+ return rewriteValueMIPS64_OpAdd32F(v)
case OpAdd64:
- return rewriteValueMIPS64_OpAdd64(v, config)
+ return rewriteValueMIPS64_OpAdd64(v)
case OpAdd64F:
- return rewriteValueMIPS64_OpAdd64F(v, config)
+ return rewriteValueMIPS64_OpAdd64F(v)
case OpAdd8:
- return rewriteValueMIPS64_OpAdd8(v, config)
+ return rewriteValueMIPS64_OpAdd8(v)
case OpAddPtr:
- return rewriteValueMIPS64_OpAddPtr(v, config)
+ return rewriteValueMIPS64_OpAddPtr(v)
case OpAddr:
- return rewriteValueMIPS64_OpAddr(v, config)
+ return rewriteValueMIPS64_OpAddr(v)
case OpAnd16:
- return rewriteValueMIPS64_OpAnd16(v, config)
+ return rewriteValueMIPS64_OpAnd16(v)
case OpAnd32:
- return rewriteValueMIPS64_OpAnd32(v, config)
+ return rewriteValueMIPS64_OpAnd32(v)
case OpAnd64:
- return rewriteValueMIPS64_OpAnd64(v, config)
+ return rewriteValueMIPS64_OpAnd64(v)
case OpAnd8:
- return rewriteValueMIPS64_OpAnd8(v, config)
+ return rewriteValueMIPS64_OpAnd8(v)
case OpAndB:
- return rewriteValueMIPS64_OpAndB(v, config)
+ return rewriteValueMIPS64_OpAndB(v)
case OpAvg64u:
- return rewriteValueMIPS64_OpAvg64u(v, config)
+ return rewriteValueMIPS64_OpAvg64u(v)
case OpClosureCall:
- return rewriteValueMIPS64_OpClosureCall(v, config)
+ return rewriteValueMIPS64_OpClosureCall(v)
case OpCom16:
- return rewriteValueMIPS64_OpCom16(v, config)
+ return rewriteValueMIPS64_OpCom16(v)
case OpCom32:
- return rewriteValueMIPS64_OpCom32(v, config)
+ return rewriteValueMIPS64_OpCom32(v)
case OpCom64:
- return rewriteValueMIPS64_OpCom64(v, config)
+ return rewriteValueMIPS64_OpCom64(v)
case OpCom8:
- return rewriteValueMIPS64_OpCom8(v, config)
+ return rewriteValueMIPS64_OpCom8(v)
case OpConst16:
- return rewriteValueMIPS64_OpConst16(v, config)
+ return rewriteValueMIPS64_OpConst16(v)
case OpConst32:
- return rewriteValueMIPS64_OpConst32(v, config)
+ return rewriteValueMIPS64_OpConst32(v)
case OpConst32F:
- return rewriteValueMIPS64_OpConst32F(v, config)
+ return rewriteValueMIPS64_OpConst32F(v)
case OpConst64:
- return rewriteValueMIPS64_OpConst64(v, config)
+ return rewriteValueMIPS64_OpConst64(v)
case OpConst64F:
- return rewriteValueMIPS64_OpConst64F(v, config)
+ return rewriteValueMIPS64_OpConst64F(v)
case OpConst8:
- return rewriteValueMIPS64_OpConst8(v, config)
+ return rewriteValueMIPS64_OpConst8(v)
case OpConstBool:
- return rewriteValueMIPS64_OpConstBool(v, config)
+ return rewriteValueMIPS64_OpConstBool(v)
case OpConstNil:
- return rewriteValueMIPS64_OpConstNil(v, config)
+ return rewriteValueMIPS64_OpConstNil(v)
case OpConvert:
- return rewriteValueMIPS64_OpConvert(v, config)
+ return rewriteValueMIPS64_OpConvert(v)
case OpCvt32Fto32:
- return rewriteValueMIPS64_OpCvt32Fto32(v, config)
+ return rewriteValueMIPS64_OpCvt32Fto32(v)
case OpCvt32Fto64:
- return rewriteValueMIPS64_OpCvt32Fto64(v, config)
+ return rewriteValueMIPS64_OpCvt32Fto64(v)
case OpCvt32Fto64F:
- return rewriteValueMIPS64_OpCvt32Fto64F(v, config)
+ return rewriteValueMIPS64_OpCvt32Fto64F(v)
case OpCvt32to32F:
- return rewriteValueMIPS64_OpCvt32to32F(v, config)
+ return rewriteValueMIPS64_OpCvt32to32F(v)
case OpCvt32to64F:
- return rewriteValueMIPS64_OpCvt32to64F(v, config)
+ return rewriteValueMIPS64_OpCvt32to64F(v)
case OpCvt64Fto32:
- return rewriteValueMIPS64_OpCvt64Fto32(v, config)
+ return rewriteValueMIPS64_OpCvt64Fto32(v)
case OpCvt64Fto32F:
- return rewriteValueMIPS64_OpCvt64Fto32F(v, config)
+ return rewriteValueMIPS64_OpCvt64Fto32F(v)
case OpCvt64Fto64:
- return rewriteValueMIPS64_OpCvt64Fto64(v, config)
+ return rewriteValueMIPS64_OpCvt64Fto64(v)
case OpCvt64to32F:
- return rewriteValueMIPS64_OpCvt64to32F(v, config)
+ return rewriteValueMIPS64_OpCvt64to32F(v)
case OpCvt64to64F:
- return rewriteValueMIPS64_OpCvt64to64F(v, config)
+ return rewriteValueMIPS64_OpCvt64to64F(v)
case OpDiv16:
- return rewriteValueMIPS64_OpDiv16(v, config)
+ return rewriteValueMIPS64_OpDiv16(v)
case OpDiv16u:
- return rewriteValueMIPS64_OpDiv16u(v, config)
+ return rewriteValueMIPS64_OpDiv16u(v)
case OpDiv32:
- return rewriteValueMIPS64_OpDiv32(v, config)
+ return rewriteValueMIPS64_OpDiv32(v)
case OpDiv32F:
- return rewriteValueMIPS64_OpDiv32F(v, config)
+ return rewriteValueMIPS64_OpDiv32F(v)
case OpDiv32u:
- return rewriteValueMIPS64_OpDiv32u(v, config)
+ return rewriteValueMIPS64_OpDiv32u(v)
case OpDiv64:
- return rewriteValueMIPS64_OpDiv64(v, config)
+ return rewriteValueMIPS64_OpDiv64(v)
case OpDiv64F:
- return rewriteValueMIPS64_OpDiv64F(v, config)
+ return rewriteValueMIPS64_OpDiv64F(v)
case OpDiv64u:
- return rewriteValueMIPS64_OpDiv64u(v, config)
+ return rewriteValueMIPS64_OpDiv64u(v)
case OpDiv8:
- return rewriteValueMIPS64_OpDiv8(v, config)
+ return rewriteValueMIPS64_OpDiv8(v)
case OpDiv8u:
- return rewriteValueMIPS64_OpDiv8u(v, config)
+ return rewriteValueMIPS64_OpDiv8u(v)
case OpEq16:
- return rewriteValueMIPS64_OpEq16(v, config)
+ return rewriteValueMIPS64_OpEq16(v)
case OpEq32:
- return rewriteValueMIPS64_OpEq32(v, config)
+ return rewriteValueMIPS64_OpEq32(v)
case OpEq32F:
- return rewriteValueMIPS64_OpEq32F(v, config)
+ return rewriteValueMIPS64_OpEq32F(v)
case OpEq64:
- return rewriteValueMIPS64_OpEq64(v, config)
+ return rewriteValueMIPS64_OpEq64(v)
case OpEq64F:
- return rewriteValueMIPS64_OpEq64F(v, config)
+ return rewriteValueMIPS64_OpEq64F(v)
case OpEq8:
- return rewriteValueMIPS64_OpEq8(v, config)
+ return rewriteValueMIPS64_OpEq8(v)
case OpEqB:
- return rewriteValueMIPS64_OpEqB(v, config)
+ return rewriteValueMIPS64_OpEqB(v)
case OpEqPtr:
- return rewriteValueMIPS64_OpEqPtr(v, config)
+ return rewriteValueMIPS64_OpEqPtr(v)
case OpGeq16:
- return rewriteValueMIPS64_OpGeq16(v, config)
+ return rewriteValueMIPS64_OpGeq16(v)
case OpGeq16U:
- return rewriteValueMIPS64_OpGeq16U(v, config)
+ return rewriteValueMIPS64_OpGeq16U(v)
case OpGeq32:
- return rewriteValueMIPS64_OpGeq32(v, config)
+ return rewriteValueMIPS64_OpGeq32(v)
case OpGeq32F:
- return rewriteValueMIPS64_OpGeq32F(v, config)
+ return rewriteValueMIPS64_OpGeq32F(v)
case OpGeq32U:
- return rewriteValueMIPS64_OpGeq32U(v, config)
+ return rewriteValueMIPS64_OpGeq32U(v)
case OpGeq64:
- return rewriteValueMIPS64_OpGeq64(v, config)
+ return rewriteValueMIPS64_OpGeq64(v)
case OpGeq64F:
- return rewriteValueMIPS64_OpGeq64F(v, config)
+ return rewriteValueMIPS64_OpGeq64F(v)
case OpGeq64U:
- return rewriteValueMIPS64_OpGeq64U(v, config)
+ return rewriteValueMIPS64_OpGeq64U(v)
case OpGeq8:
- return rewriteValueMIPS64_OpGeq8(v, config)
+ return rewriteValueMIPS64_OpGeq8(v)
case OpGeq8U:
- return rewriteValueMIPS64_OpGeq8U(v, config)
+ return rewriteValueMIPS64_OpGeq8U(v)
case OpGetClosurePtr:
- return rewriteValueMIPS64_OpGetClosurePtr(v, config)
+ return rewriteValueMIPS64_OpGetClosurePtr(v)
case OpGreater16:
- return rewriteValueMIPS64_OpGreater16(v, config)
+ return rewriteValueMIPS64_OpGreater16(v)
case OpGreater16U:
- return rewriteValueMIPS64_OpGreater16U(v, config)
+ return rewriteValueMIPS64_OpGreater16U(v)
case OpGreater32:
- return rewriteValueMIPS64_OpGreater32(v, config)
+ return rewriteValueMIPS64_OpGreater32(v)
case OpGreater32F:
- return rewriteValueMIPS64_OpGreater32F(v, config)
+ return rewriteValueMIPS64_OpGreater32F(v)
case OpGreater32U:
- return rewriteValueMIPS64_OpGreater32U(v, config)
+ return rewriteValueMIPS64_OpGreater32U(v)
case OpGreater64:
- return rewriteValueMIPS64_OpGreater64(v, config)
+ return rewriteValueMIPS64_OpGreater64(v)
case OpGreater64F:
- return rewriteValueMIPS64_OpGreater64F(v, config)
+ return rewriteValueMIPS64_OpGreater64F(v)
case OpGreater64U:
- return rewriteValueMIPS64_OpGreater64U(v, config)
+ return rewriteValueMIPS64_OpGreater64U(v)
case OpGreater8:
- return rewriteValueMIPS64_OpGreater8(v, config)
+ return rewriteValueMIPS64_OpGreater8(v)
case OpGreater8U:
- return rewriteValueMIPS64_OpGreater8U(v, config)
+ return rewriteValueMIPS64_OpGreater8U(v)
case OpHmul32:
- return rewriteValueMIPS64_OpHmul32(v, config)
+ return rewriteValueMIPS64_OpHmul32(v)
case OpHmul32u:
- return rewriteValueMIPS64_OpHmul32u(v, config)
+ return rewriteValueMIPS64_OpHmul32u(v)
case OpHmul64:
- return rewriteValueMIPS64_OpHmul64(v, config)
+ return rewriteValueMIPS64_OpHmul64(v)
case OpHmul64u:
- return rewriteValueMIPS64_OpHmul64u(v, config)
+ return rewriteValueMIPS64_OpHmul64u(v)
case OpInterCall:
- return rewriteValueMIPS64_OpInterCall(v, config)
+ return rewriteValueMIPS64_OpInterCall(v)
case OpIsInBounds:
- return rewriteValueMIPS64_OpIsInBounds(v, config)
+ return rewriteValueMIPS64_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValueMIPS64_OpIsNonNil(v, config)
+ return rewriteValueMIPS64_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValueMIPS64_OpIsSliceInBounds(v, config)
+ return rewriteValueMIPS64_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValueMIPS64_OpLeq16(v, config)
+ return rewriteValueMIPS64_OpLeq16(v)
case OpLeq16U:
- return rewriteValueMIPS64_OpLeq16U(v, config)
+ return rewriteValueMIPS64_OpLeq16U(v)
case OpLeq32:
- return rewriteValueMIPS64_OpLeq32(v, config)
+ return rewriteValueMIPS64_OpLeq32(v)
case OpLeq32F:
- return rewriteValueMIPS64_OpLeq32F(v, config)
+ return rewriteValueMIPS64_OpLeq32F(v)
case OpLeq32U:
- return rewriteValueMIPS64_OpLeq32U(v, config)
+ return rewriteValueMIPS64_OpLeq32U(v)
case OpLeq64:
- return rewriteValueMIPS64_OpLeq64(v, config)
+ return rewriteValueMIPS64_OpLeq64(v)
case OpLeq64F:
- return rewriteValueMIPS64_OpLeq64F(v, config)
+ return rewriteValueMIPS64_OpLeq64F(v)
case OpLeq64U:
- return rewriteValueMIPS64_OpLeq64U(v, config)
+ return rewriteValueMIPS64_OpLeq64U(v)
case OpLeq8:
- return rewriteValueMIPS64_OpLeq8(v, config)
+ return rewriteValueMIPS64_OpLeq8(v)
case OpLeq8U:
- return rewriteValueMIPS64_OpLeq8U(v, config)
+ return rewriteValueMIPS64_OpLeq8U(v)
case OpLess16:
- return rewriteValueMIPS64_OpLess16(v, config)
+ return rewriteValueMIPS64_OpLess16(v)
case OpLess16U:
- return rewriteValueMIPS64_OpLess16U(v, config)
+ return rewriteValueMIPS64_OpLess16U(v)
case OpLess32:
- return rewriteValueMIPS64_OpLess32(v, config)
+ return rewriteValueMIPS64_OpLess32(v)
case OpLess32F:
- return rewriteValueMIPS64_OpLess32F(v, config)
+ return rewriteValueMIPS64_OpLess32F(v)
case OpLess32U:
- return rewriteValueMIPS64_OpLess32U(v, config)
+ return rewriteValueMIPS64_OpLess32U(v)
case OpLess64:
- return rewriteValueMIPS64_OpLess64(v, config)
+ return rewriteValueMIPS64_OpLess64(v)
case OpLess64F:
- return rewriteValueMIPS64_OpLess64F(v, config)
+ return rewriteValueMIPS64_OpLess64F(v)
case OpLess64U:
- return rewriteValueMIPS64_OpLess64U(v, config)
+ return rewriteValueMIPS64_OpLess64U(v)
case OpLess8:
- return rewriteValueMIPS64_OpLess8(v, config)
+ return rewriteValueMIPS64_OpLess8(v)
case OpLess8U:
- return rewriteValueMIPS64_OpLess8U(v, config)
+ return rewriteValueMIPS64_OpLess8U(v)
case OpLoad:
- return rewriteValueMIPS64_OpLoad(v, config)
+ return rewriteValueMIPS64_OpLoad(v)
case OpLsh16x16:
- return rewriteValueMIPS64_OpLsh16x16(v, config)
+ return rewriteValueMIPS64_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValueMIPS64_OpLsh16x32(v, config)
+ return rewriteValueMIPS64_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValueMIPS64_OpLsh16x64(v, config)
+ return rewriteValueMIPS64_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValueMIPS64_OpLsh16x8(v, config)
+ return rewriteValueMIPS64_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValueMIPS64_OpLsh32x16(v, config)
+ return rewriteValueMIPS64_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValueMIPS64_OpLsh32x32(v, config)
+ return rewriteValueMIPS64_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValueMIPS64_OpLsh32x64(v, config)
+ return rewriteValueMIPS64_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValueMIPS64_OpLsh32x8(v, config)
+ return rewriteValueMIPS64_OpLsh32x8(v)
case OpLsh64x16:
- return rewriteValueMIPS64_OpLsh64x16(v, config)
+ return rewriteValueMIPS64_OpLsh64x16(v)
case OpLsh64x32:
- return rewriteValueMIPS64_OpLsh64x32(v, config)
+ return rewriteValueMIPS64_OpLsh64x32(v)
case OpLsh64x64:
- return rewriteValueMIPS64_OpLsh64x64(v, config)
+ return rewriteValueMIPS64_OpLsh64x64(v)
case OpLsh64x8:
- return rewriteValueMIPS64_OpLsh64x8(v, config)
+ return rewriteValueMIPS64_OpLsh64x8(v)
case OpLsh8x16:
- return rewriteValueMIPS64_OpLsh8x16(v, config)
+ return rewriteValueMIPS64_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValueMIPS64_OpLsh8x32(v, config)
+ return rewriteValueMIPS64_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValueMIPS64_OpLsh8x64(v, config)
+ return rewriteValueMIPS64_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValueMIPS64_OpLsh8x8(v, config)
+ return rewriteValueMIPS64_OpLsh8x8(v)
case OpMIPS64ADDV:
- return rewriteValueMIPS64_OpMIPS64ADDV(v, config)
+ return rewriteValueMIPS64_OpMIPS64ADDV(v)
case OpMIPS64ADDVconst:
- return rewriteValueMIPS64_OpMIPS64ADDVconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64ADDVconst(v)
case OpMIPS64AND:
- return rewriteValueMIPS64_OpMIPS64AND(v, config)
+ return rewriteValueMIPS64_OpMIPS64AND(v)
case OpMIPS64ANDconst:
- return rewriteValueMIPS64_OpMIPS64ANDconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64ANDconst(v)
case OpMIPS64MOVBUload:
- return rewriteValueMIPS64_OpMIPS64MOVBUload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVBUload(v)
case OpMIPS64MOVBUreg:
- return rewriteValueMIPS64_OpMIPS64MOVBUreg(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVBUreg(v)
case OpMIPS64MOVBload:
- return rewriteValueMIPS64_OpMIPS64MOVBload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVBload(v)
case OpMIPS64MOVBreg:
- return rewriteValueMIPS64_OpMIPS64MOVBreg(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVBreg(v)
case OpMIPS64MOVBstore:
- return rewriteValueMIPS64_OpMIPS64MOVBstore(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVBstore(v)
case OpMIPS64MOVBstorezero:
- return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v)
case OpMIPS64MOVDload:
- return rewriteValueMIPS64_OpMIPS64MOVDload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVDload(v)
case OpMIPS64MOVDstore:
- return rewriteValueMIPS64_OpMIPS64MOVDstore(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVDstore(v)
case OpMIPS64MOVFload:
- return rewriteValueMIPS64_OpMIPS64MOVFload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVFload(v)
case OpMIPS64MOVFstore:
- return rewriteValueMIPS64_OpMIPS64MOVFstore(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVFstore(v)
case OpMIPS64MOVHUload:
- return rewriteValueMIPS64_OpMIPS64MOVHUload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVHUload(v)
case OpMIPS64MOVHUreg:
- return rewriteValueMIPS64_OpMIPS64MOVHUreg(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVHUreg(v)
case OpMIPS64MOVHload:
- return rewriteValueMIPS64_OpMIPS64MOVHload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVHload(v)
case OpMIPS64MOVHreg:
- return rewriteValueMIPS64_OpMIPS64MOVHreg(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVHreg(v)
case OpMIPS64MOVHstore:
- return rewriteValueMIPS64_OpMIPS64MOVHstore(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVHstore(v)
case OpMIPS64MOVHstorezero:
- return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v)
case OpMIPS64MOVVload:
- return rewriteValueMIPS64_OpMIPS64MOVVload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVVload(v)
case OpMIPS64MOVVreg:
- return rewriteValueMIPS64_OpMIPS64MOVVreg(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVVreg(v)
case OpMIPS64MOVVstore:
- return rewriteValueMIPS64_OpMIPS64MOVVstore(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVVstore(v)
case OpMIPS64MOVVstorezero:
- return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v)
case OpMIPS64MOVWUload:
- return rewriteValueMIPS64_OpMIPS64MOVWUload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVWUload(v)
case OpMIPS64MOVWUreg:
- return rewriteValueMIPS64_OpMIPS64MOVWUreg(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVWUreg(v)
case OpMIPS64MOVWload:
- return rewriteValueMIPS64_OpMIPS64MOVWload(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVWload(v)
case OpMIPS64MOVWreg:
- return rewriteValueMIPS64_OpMIPS64MOVWreg(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVWreg(v)
case OpMIPS64MOVWstore:
- return rewriteValueMIPS64_OpMIPS64MOVWstore(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVWstore(v)
case OpMIPS64MOVWstorezero:
- return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v, config)
+ return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v)
case OpMIPS64NEGV:
- return rewriteValueMIPS64_OpMIPS64NEGV(v, config)
+ return rewriteValueMIPS64_OpMIPS64NEGV(v)
case OpMIPS64NOR:
- return rewriteValueMIPS64_OpMIPS64NOR(v, config)
+ return rewriteValueMIPS64_OpMIPS64NOR(v)
case OpMIPS64NORconst:
- return rewriteValueMIPS64_OpMIPS64NORconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64NORconst(v)
case OpMIPS64OR:
- return rewriteValueMIPS64_OpMIPS64OR(v, config)
+ return rewriteValueMIPS64_OpMIPS64OR(v)
case OpMIPS64ORconst:
- return rewriteValueMIPS64_OpMIPS64ORconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64ORconst(v)
case OpMIPS64SGT:
- return rewriteValueMIPS64_OpMIPS64SGT(v, config)
+ return rewriteValueMIPS64_OpMIPS64SGT(v)
case OpMIPS64SGTU:
- return rewriteValueMIPS64_OpMIPS64SGTU(v, config)
+ return rewriteValueMIPS64_OpMIPS64SGTU(v)
case OpMIPS64SGTUconst:
- return rewriteValueMIPS64_OpMIPS64SGTUconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64SGTUconst(v)
case OpMIPS64SGTconst:
- return rewriteValueMIPS64_OpMIPS64SGTconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64SGTconst(v)
case OpMIPS64SLLV:
- return rewriteValueMIPS64_OpMIPS64SLLV(v, config)
+ return rewriteValueMIPS64_OpMIPS64SLLV(v)
case OpMIPS64SLLVconst:
- return rewriteValueMIPS64_OpMIPS64SLLVconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64SLLVconst(v)
case OpMIPS64SRAV:
- return rewriteValueMIPS64_OpMIPS64SRAV(v, config)
+ return rewriteValueMIPS64_OpMIPS64SRAV(v)
case OpMIPS64SRAVconst:
- return rewriteValueMIPS64_OpMIPS64SRAVconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64SRAVconst(v)
case OpMIPS64SRLV:
- return rewriteValueMIPS64_OpMIPS64SRLV(v, config)
+ return rewriteValueMIPS64_OpMIPS64SRLV(v)
case OpMIPS64SRLVconst:
- return rewriteValueMIPS64_OpMIPS64SRLVconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64SRLVconst(v)
case OpMIPS64SUBV:
- return rewriteValueMIPS64_OpMIPS64SUBV(v, config)
+ return rewriteValueMIPS64_OpMIPS64SUBV(v)
case OpMIPS64SUBVconst:
- return rewriteValueMIPS64_OpMIPS64SUBVconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64SUBVconst(v)
case OpMIPS64XOR:
- return rewriteValueMIPS64_OpMIPS64XOR(v, config)
+ return rewriteValueMIPS64_OpMIPS64XOR(v)
case OpMIPS64XORconst:
- return rewriteValueMIPS64_OpMIPS64XORconst(v, config)
+ return rewriteValueMIPS64_OpMIPS64XORconst(v)
case OpMod16:
- return rewriteValueMIPS64_OpMod16(v, config)
+ return rewriteValueMIPS64_OpMod16(v)
case OpMod16u:
- return rewriteValueMIPS64_OpMod16u(v, config)
+ return rewriteValueMIPS64_OpMod16u(v)
case OpMod32:
- return rewriteValueMIPS64_OpMod32(v, config)
+ return rewriteValueMIPS64_OpMod32(v)
case OpMod32u:
- return rewriteValueMIPS64_OpMod32u(v, config)
+ return rewriteValueMIPS64_OpMod32u(v)
case OpMod64:
- return rewriteValueMIPS64_OpMod64(v, config)
+ return rewriteValueMIPS64_OpMod64(v)
case OpMod64u:
- return rewriteValueMIPS64_OpMod64u(v, config)
+ return rewriteValueMIPS64_OpMod64u(v)
case OpMod8:
- return rewriteValueMIPS64_OpMod8(v, config)
+ return rewriteValueMIPS64_OpMod8(v)
case OpMod8u:
- return rewriteValueMIPS64_OpMod8u(v, config)
+ return rewriteValueMIPS64_OpMod8u(v)
case OpMove:
- return rewriteValueMIPS64_OpMove(v, config)
+ return rewriteValueMIPS64_OpMove(v)
case OpMul16:
- return rewriteValueMIPS64_OpMul16(v, config)
+ return rewriteValueMIPS64_OpMul16(v)
case OpMul32:
- return rewriteValueMIPS64_OpMul32(v, config)
+ return rewriteValueMIPS64_OpMul32(v)
case OpMul32F:
- return rewriteValueMIPS64_OpMul32F(v, config)
+ return rewriteValueMIPS64_OpMul32F(v)
case OpMul64:
- return rewriteValueMIPS64_OpMul64(v, config)
+ return rewriteValueMIPS64_OpMul64(v)
case OpMul64F:
- return rewriteValueMIPS64_OpMul64F(v, config)
+ return rewriteValueMIPS64_OpMul64F(v)
case OpMul8:
- return rewriteValueMIPS64_OpMul8(v, config)
+ return rewriteValueMIPS64_OpMul8(v)
case OpNeg16:
- return rewriteValueMIPS64_OpNeg16(v, config)
+ return rewriteValueMIPS64_OpNeg16(v)
case OpNeg32:
- return rewriteValueMIPS64_OpNeg32(v, config)
+ return rewriteValueMIPS64_OpNeg32(v)
case OpNeg32F:
- return rewriteValueMIPS64_OpNeg32F(v, config)
+ return rewriteValueMIPS64_OpNeg32F(v)
case OpNeg64:
- return rewriteValueMIPS64_OpNeg64(v, config)
+ return rewriteValueMIPS64_OpNeg64(v)
case OpNeg64F:
- return rewriteValueMIPS64_OpNeg64F(v, config)
+ return rewriteValueMIPS64_OpNeg64F(v)
case OpNeg8:
- return rewriteValueMIPS64_OpNeg8(v, config)
+ return rewriteValueMIPS64_OpNeg8(v)
case OpNeq16:
- return rewriteValueMIPS64_OpNeq16(v, config)
+ return rewriteValueMIPS64_OpNeq16(v)
case OpNeq32:
- return rewriteValueMIPS64_OpNeq32(v, config)
+ return rewriteValueMIPS64_OpNeq32(v)
case OpNeq32F:
- return rewriteValueMIPS64_OpNeq32F(v, config)
+ return rewriteValueMIPS64_OpNeq32F(v)
case OpNeq64:
- return rewriteValueMIPS64_OpNeq64(v, config)
+ return rewriteValueMIPS64_OpNeq64(v)
case OpNeq64F:
- return rewriteValueMIPS64_OpNeq64F(v, config)
+ return rewriteValueMIPS64_OpNeq64F(v)
case OpNeq8:
- return rewriteValueMIPS64_OpNeq8(v, config)
+ return rewriteValueMIPS64_OpNeq8(v)
case OpNeqB:
- return rewriteValueMIPS64_OpNeqB(v, config)
+ return rewriteValueMIPS64_OpNeqB(v)
case OpNeqPtr:
- return rewriteValueMIPS64_OpNeqPtr(v, config)
+ return rewriteValueMIPS64_OpNeqPtr(v)
case OpNilCheck:
- return rewriteValueMIPS64_OpNilCheck(v, config)
+ return rewriteValueMIPS64_OpNilCheck(v)
case OpNot:
- return rewriteValueMIPS64_OpNot(v, config)
+ return rewriteValueMIPS64_OpNot(v)
case OpOffPtr:
- return rewriteValueMIPS64_OpOffPtr(v, config)
+ return rewriteValueMIPS64_OpOffPtr(v)
case OpOr16:
- return rewriteValueMIPS64_OpOr16(v, config)
+ return rewriteValueMIPS64_OpOr16(v)
case OpOr32:
- return rewriteValueMIPS64_OpOr32(v, config)
+ return rewriteValueMIPS64_OpOr32(v)
case OpOr64:
- return rewriteValueMIPS64_OpOr64(v, config)
+ return rewriteValueMIPS64_OpOr64(v)
case OpOr8:
- return rewriteValueMIPS64_OpOr8(v, config)
+ return rewriteValueMIPS64_OpOr8(v)
case OpOrB:
- return rewriteValueMIPS64_OpOrB(v, config)
+ return rewriteValueMIPS64_OpOrB(v)
case OpRound32F:
- return rewriteValueMIPS64_OpRound32F(v, config)
+ return rewriteValueMIPS64_OpRound32F(v)
case OpRound64F:
- return rewriteValueMIPS64_OpRound64F(v, config)
+ return rewriteValueMIPS64_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValueMIPS64_OpRsh16Ux16(v, config)
+ return rewriteValueMIPS64_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValueMIPS64_OpRsh16Ux32(v, config)
+ return rewriteValueMIPS64_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValueMIPS64_OpRsh16Ux64(v, config)
+ return rewriteValueMIPS64_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValueMIPS64_OpRsh16Ux8(v, config)
+ return rewriteValueMIPS64_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValueMIPS64_OpRsh16x16(v, config)
+ return rewriteValueMIPS64_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValueMIPS64_OpRsh16x32(v, config)
+ return rewriteValueMIPS64_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValueMIPS64_OpRsh16x64(v, config)
+ return rewriteValueMIPS64_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValueMIPS64_OpRsh16x8(v, config)
+ return rewriteValueMIPS64_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValueMIPS64_OpRsh32Ux16(v, config)
+ return rewriteValueMIPS64_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValueMIPS64_OpRsh32Ux32(v, config)
+ return rewriteValueMIPS64_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValueMIPS64_OpRsh32Ux64(v, config)
+ return rewriteValueMIPS64_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValueMIPS64_OpRsh32Ux8(v, config)
+ return rewriteValueMIPS64_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValueMIPS64_OpRsh32x16(v, config)
+ return rewriteValueMIPS64_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValueMIPS64_OpRsh32x32(v, config)
+ return rewriteValueMIPS64_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValueMIPS64_OpRsh32x64(v, config)
+ return rewriteValueMIPS64_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValueMIPS64_OpRsh32x8(v, config)
+ return rewriteValueMIPS64_OpRsh32x8(v)
case OpRsh64Ux16:
- return rewriteValueMIPS64_OpRsh64Ux16(v, config)
+ return rewriteValueMIPS64_OpRsh64Ux16(v)
case OpRsh64Ux32:
- return rewriteValueMIPS64_OpRsh64Ux32(v, config)
+ return rewriteValueMIPS64_OpRsh64Ux32(v)
case OpRsh64Ux64:
- return rewriteValueMIPS64_OpRsh64Ux64(v, config)
+ return rewriteValueMIPS64_OpRsh64Ux64(v)
case OpRsh64Ux8:
- return rewriteValueMIPS64_OpRsh64Ux8(v, config)
+ return rewriteValueMIPS64_OpRsh64Ux8(v)
case OpRsh64x16:
- return rewriteValueMIPS64_OpRsh64x16(v, config)
+ return rewriteValueMIPS64_OpRsh64x16(v)
case OpRsh64x32:
- return rewriteValueMIPS64_OpRsh64x32(v, config)
+ return rewriteValueMIPS64_OpRsh64x32(v)
case OpRsh64x64:
- return rewriteValueMIPS64_OpRsh64x64(v, config)
+ return rewriteValueMIPS64_OpRsh64x64(v)
case OpRsh64x8:
- return rewriteValueMIPS64_OpRsh64x8(v, config)
+ return rewriteValueMIPS64_OpRsh64x8(v)
case OpRsh8Ux16:
- return rewriteValueMIPS64_OpRsh8Ux16(v, config)
+ return rewriteValueMIPS64_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValueMIPS64_OpRsh8Ux32(v, config)
+ return rewriteValueMIPS64_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValueMIPS64_OpRsh8Ux64(v, config)
+ return rewriteValueMIPS64_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValueMIPS64_OpRsh8Ux8(v, config)
+ return rewriteValueMIPS64_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValueMIPS64_OpRsh8x16(v, config)
+ return rewriteValueMIPS64_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValueMIPS64_OpRsh8x32(v, config)
+ return rewriteValueMIPS64_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValueMIPS64_OpRsh8x64(v, config)
+ return rewriteValueMIPS64_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValueMIPS64_OpRsh8x8(v, config)
+ return rewriteValueMIPS64_OpRsh8x8(v)
case OpSelect0:
- return rewriteValueMIPS64_OpSelect0(v, config)
+ return rewriteValueMIPS64_OpSelect0(v)
case OpSelect1:
- return rewriteValueMIPS64_OpSelect1(v, config)
+ return rewriteValueMIPS64_OpSelect1(v)
case OpSignExt16to32:
- return rewriteValueMIPS64_OpSignExt16to32(v, config)
+ return rewriteValueMIPS64_OpSignExt16to32(v)
case OpSignExt16to64:
- return rewriteValueMIPS64_OpSignExt16to64(v, config)
+ return rewriteValueMIPS64_OpSignExt16to64(v)
case OpSignExt32to64:
- return rewriteValueMIPS64_OpSignExt32to64(v, config)
+ return rewriteValueMIPS64_OpSignExt32to64(v)
case OpSignExt8to16:
- return rewriteValueMIPS64_OpSignExt8to16(v, config)
+ return rewriteValueMIPS64_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValueMIPS64_OpSignExt8to32(v, config)
+ return rewriteValueMIPS64_OpSignExt8to32(v)
case OpSignExt8to64:
- return rewriteValueMIPS64_OpSignExt8to64(v, config)
+ return rewriteValueMIPS64_OpSignExt8to64(v)
case OpSlicemask:
- return rewriteValueMIPS64_OpSlicemask(v, config)
+ return rewriteValueMIPS64_OpSlicemask(v)
case OpStaticCall:
- return rewriteValueMIPS64_OpStaticCall(v, config)
+ return rewriteValueMIPS64_OpStaticCall(v)
case OpStore:
- return rewriteValueMIPS64_OpStore(v, config)
+ return rewriteValueMIPS64_OpStore(v)
case OpSub16:
- return rewriteValueMIPS64_OpSub16(v, config)
+ return rewriteValueMIPS64_OpSub16(v)
case OpSub32:
- return rewriteValueMIPS64_OpSub32(v, config)
+ return rewriteValueMIPS64_OpSub32(v)
case OpSub32F:
- return rewriteValueMIPS64_OpSub32F(v, config)
+ return rewriteValueMIPS64_OpSub32F(v)
case OpSub64:
- return rewriteValueMIPS64_OpSub64(v, config)
+ return rewriteValueMIPS64_OpSub64(v)
case OpSub64F:
- return rewriteValueMIPS64_OpSub64F(v, config)
+ return rewriteValueMIPS64_OpSub64F(v)
case OpSub8:
- return rewriteValueMIPS64_OpSub8(v, config)
+ return rewriteValueMIPS64_OpSub8(v)
case OpSubPtr:
- return rewriteValueMIPS64_OpSubPtr(v, config)
+ return rewriteValueMIPS64_OpSubPtr(v)
case OpTrunc16to8:
- return rewriteValueMIPS64_OpTrunc16to8(v, config)
+ return rewriteValueMIPS64_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValueMIPS64_OpTrunc32to16(v, config)
+ return rewriteValueMIPS64_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValueMIPS64_OpTrunc32to8(v, config)
+ return rewriteValueMIPS64_OpTrunc32to8(v)
case OpTrunc64to16:
- return rewriteValueMIPS64_OpTrunc64to16(v, config)
+ return rewriteValueMIPS64_OpTrunc64to16(v)
case OpTrunc64to32:
- return rewriteValueMIPS64_OpTrunc64to32(v, config)
+ return rewriteValueMIPS64_OpTrunc64to32(v)
case OpTrunc64to8:
- return rewriteValueMIPS64_OpTrunc64to8(v, config)
+ return rewriteValueMIPS64_OpTrunc64to8(v)
case OpXor16:
- return rewriteValueMIPS64_OpXor16(v, config)
+ return rewriteValueMIPS64_OpXor16(v)
case OpXor32:
- return rewriteValueMIPS64_OpXor32(v, config)
+ return rewriteValueMIPS64_OpXor32(v)
case OpXor64:
- return rewriteValueMIPS64_OpXor64(v, config)
+ return rewriteValueMIPS64_OpXor64(v)
case OpXor8:
- return rewriteValueMIPS64_OpXor8(v, config)
+ return rewriteValueMIPS64_OpXor8(v)
case OpZero:
- return rewriteValueMIPS64_OpZero(v, config)
+ return rewriteValueMIPS64_OpZero(v)
case OpZeroExt16to32:
- return rewriteValueMIPS64_OpZeroExt16to32(v, config)
+ return rewriteValueMIPS64_OpZeroExt16to32(v)
case OpZeroExt16to64:
- return rewriteValueMIPS64_OpZeroExt16to64(v, config)
+ return rewriteValueMIPS64_OpZeroExt16to64(v)
case OpZeroExt32to64:
- return rewriteValueMIPS64_OpZeroExt32to64(v, config)
+ return rewriteValueMIPS64_OpZeroExt32to64(v)
case OpZeroExt8to16:
- return rewriteValueMIPS64_OpZeroExt8to16(v, config)
+ return rewriteValueMIPS64_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValueMIPS64_OpZeroExt8to32(v, config)
+ return rewriteValueMIPS64_OpZeroExt8to32(v)
case OpZeroExt8to64:
- return rewriteValueMIPS64_OpZeroExt8to64(v, config)
+ return rewriteValueMIPS64_OpZeroExt8to64(v)
}
return false
}
-func rewriteValueMIPS64_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAdd16(v *Value) bool {
// match: (Add16 x y)
// cond:
// result: (ADDV x y)
return true
}
}
-func rewriteValueMIPS64_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAdd32(v *Value) bool {
// match: (Add32 x y)
// cond:
// result: (ADDV x y)
return true
}
}
-func rewriteValueMIPS64_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAdd32F(v *Value) bool {
// match: (Add32F x y)
// cond:
// result: (ADDF x y)
return true
}
}
-func rewriteValueMIPS64_OpAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAdd64(v *Value) bool {
// match: (Add64 x y)
// cond:
// result: (ADDV x y)
return true
}
}
-func rewriteValueMIPS64_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAdd64F(v *Value) bool {
// match: (Add64F x y)
// cond:
// result: (ADDD x y)
return true
}
}
-func rewriteValueMIPS64_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAdd8(v *Value) bool {
// match: (Add8 x y)
// cond:
// result: (ADDV x y)
return true
}
}
-func rewriteValueMIPS64_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAddPtr(v *Value) bool {
// match: (AddPtr x y)
// cond:
// result: (ADDV x y)
return true
}
}
-func rewriteValueMIPS64_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAddr(v *Value) bool {
// match: (Addr {sym} base)
// cond:
// result: (MOVVaddr {sym} base)
return true
}
}
-func rewriteValueMIPS64_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAnd16(v *Value) bool {
// match: (And16 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS64_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAnd32(v *Value) bool {
// match: (And32 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS64_OpAnd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAnd64(v *Value) bool {
// match: (And64 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS64_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAnd8(v *Value) bool {
// match: (And8 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS64_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpAndB(v *Value) bool {
// match: (AndB x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueMIPS64_OpAvg64u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpAvg64u(v *Value) bool {
b := v.Block
_ = b
// match: (Avg64u <t> x y)
return true
}
}
-func rewriteValueMIPS64_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpClosureCall(v *Value) bool {
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
return true
}
}
-func rewriteValueMIPS64_OpCom16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpCom16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Com16 x)
// cond:
// result: (NOR (MOVVconst [0]) x)
for {
x := v.Args[0]
v.reset(OpMIPS64NOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
return true
}
}
-func rewriteValueMIPS64_OpCom32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpCom32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Com32 x)
// cond:
// result: (NOR (MOVVconst [0]) x)
for {
x := v.Args[0]
v.reset(OpMIPS64NOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
return true
}
}
-func rewriteValueMIPS64_OpCom64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpCom64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Com64 x)
// cond:
// result: (NOR (MOVVconst [0]) x)
for {
x := v.Args[0]
v.reset(OpMIPS64NOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
return true
}
}
-func rewriteValueMIPS64_OpCom8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpCom8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Com8 x)
// cond:
// result: (NOR (MOVVconst [0]) x)
for {
x := v.Args[0]
v.reset(OpMIPS64NOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
return true
}
}
-func rewriteValueMIPS64_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConst16(v *Value) bool {
// match: (Const16 [val])
// cond:
// result: (MOVVconst [val])
return true
}
}
-func rewriteValueMIPS64_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConst32(v *Value) bool {
// match: (Const32 [val])
// cond:
// result: (MOVVconst [val])
return true
}
}
-func rewriteValueMIPS64_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// cond:
// result: (MOVFconst [val])
return true
}
}
-func rewriteValueMIPS64_OpConst64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConst64(v *Value) bool {
// match: (Const64 [val])
// cond:
// result: (MOVVconst [val])
return true
}
}
-func rewriteValueMIPS64_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueMIPS64_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConst8(v *Value) bool {
// match: (Const8 [val])
// cond:
// result: (MOVVconst [val])
return true
}
}
-func rewriteValueMIPS64_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// cond:
// result: (MOVVconst [b])
return true
}
}
-func rewriteValueMIPS64_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConstNil(v *Value) bool {
// match: (ConstNil)
// cond:
// result: (MOVVconst [0])
return true
}
}
-func rewriteValueMIPS64_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpConvert(v *Value) bool {
// match: (Convert x mem)
// cond:
// result: (MOVVconvert x mem)
return true
}
}
-func rewriteValueMIPS64_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt32Fto32(v *Value) bool {
// match: (Cvt32Fto32 x)
// cond:
// result: (TRUNCFW x)
return true
}
}
-func rewriteValueMIPS64_OpCvt32Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt32Fto64(v *Value) bool {
// match: (Cvt32Fto64 x)
// cond:
// result: (TRUNCFV x)
return true
}
}
-func rewriteValueMIPS64_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F x)
// cond:
// result: (MOVFD x)
return true
}
}
-func rewriteValueMIPS64_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt32to32F(v *Value) bool {
// match: (Cvt32to32F x)
// cond:
// result: (MOVWF x)
return true
}
}
-func rewriteValueMIPS64_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt32to64F(v *Value) bool {
// match: (Cvt32to64F x)
// cond:
// result: (MOVWD x)
return true
}
}
-func rewriteValueMIPS64_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt64Fto32(v *Value) bool {
// match: (Cvt64Fto32 x)
// cond:
// result: (TRUNCDW x)
return true
}
}
-func rewriteValueMIPS64_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F x)
// cond:
// result: (MOVDF x)
return true
}
}
-func rewriteValueMIPS64_OpCvt64Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt64Fto64(v *Value) bool {
// match: (Cvt64Fto64 x)
// cond:
// result: (TRUNCDV x)
return true
}
}
-func rewriteValueMIPS64_OpCvt64to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt64to32F(v *Value) bool {
// match: (Cvt64to32F x)
// cond:
// result: (MOVVF x)
return true
}
}
-func rewriteValueMIPS64_OpCvt64to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpCvt64to64F(v *Value) bool {
// match: (Cvt64to64F x)
// cond:
// result: (MOVVD x)
return true
}
}
-func rewriteValueMIPS64_OpDiv16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16 x y)
// cond:
// result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v1 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16u x y)
// cond:
// result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpDiv32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32 x y)
// cond:
// result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpDiv32F(v *Value) bool {
// match: (Div32F x y)
// cond:
// result: (DIVF x y)
return true
}
}
-func rewriteValueMIPS64_OpDiv32u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32u x y)
// cond:
// result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpDiv64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div64 x y)
// cond:
// result: (Select1 (DIVV x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpDiv64F(v *Value) bool {
// match: (Div64F x y)
// cond:
// result: (DIVD x y)
return true
}
}
-func rewriteValueMIPS64_OpDiv64u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv64u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div64u x y)
// cond:
// result: (Select1 (DIVVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpDiv8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 x y)
// cond:
// result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v1 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u x y)
// cond:
// result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpEq16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpEq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq16 x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpEq32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpEq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq32 x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpEq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpEq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32F x y)
return true
}
}
-func rewriteValueMIPS64_OpEq64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpEq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq64 x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpEq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpEq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64F x y)
return true
}
}
-func rewriteValueMIPS64_OpEq8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpEq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq8 x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpEqB(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpEqB(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqB x y)
// cond:
- // result: (XOR (MOVVconst [1]) (XOR <config.fe.TypeBool()> x y))
+ // result: (XOR (MOVVconst [1]) (XOR <fe.TypeBool()> x y))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeBool())
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqPtr x y)
// cond:
// result: (SGTU (MOVVconst [1]) (XOR x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 y) (SignExt16to64 x)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGeq32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq32 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 y) (SignExt32to64 x)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32F x y)
return true
}
}
-func rewriteValueMIPS64_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq32U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGeq64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq64 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT y x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, fe.TypeBool())
v1.AddArg(y)
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64F x y)
return true
}
}
-func rewriteValueMIPS64_OpGeq64U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq64U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU y x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
v1.AddArg(y)
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGeq8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 y) (SignExt8to64 x)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(x)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpGetClosurePtr(v *Value) bool {
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
return true
}
}
-func rewriteValueMIPS64_OpGreater16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGreater16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16 x y)
// cond:
// result: (SGT (SignExt16to64 x) (SignExt16to64 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGreater16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16U x y)
// cond:
// result: (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGreater32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGreater32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater32 x y)
// cond:
// result: (SGT (SignExt32to64 x) (SignExt32to64 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGreater32F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32F x y)
return true
}
}
-func rewriteValueMIPS64_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGreater32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater32U x y)
// cond:
// result: (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGreater64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpGreater64(v *Value) bool {
// match: (Greater64 x y)
// cond:
// result: (SGT x y)
return true
}
}
-func rewriteValueMIPS64_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGreater64F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64F x y)
return true
}
}
-func rewriteValueMIPS64_OpGreater64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpGreater64U(v *Value) bool {
// match: (Greater64U x y)
// cond:
// result: (SGTU x y)
return true
}
}
-func rewriteValueMIPS64_OpGreater8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGreater8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8 x y)
// cond:
// result: (SGT (SignExt8to64 x) (SignExt8to64 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpGreater8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8U x y)
// cond:
// result: (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpHmul32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpHmul32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul32 x y)
// cond:
- // result: (SRAVconst (Select1 <config.fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+ // result: (SRAVconst (Select1 <fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAVconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpSelect1, config.fe.TypeInt64())
- v1 := b.NewValue0(v.Pos, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSelect1, fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpHmul32u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpHmul32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul32u x y)
// cond:
- // result: (SRLVconst (Select1 <config.fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+ // result: (SRLVconst (Select1 <fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRLVconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpSelect1, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpSelect1, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpHmul64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpHmul64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul64 x y)
// cond:
// result: (Select0 (MULV x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpHmul64u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpHmul64u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul64u x y)
// cond:
// result: (Select0 (MULVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpInterCall(v *Value) bool {
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
return true
}
}
-func rewriteValueMIPS64_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpIsInBounds(v *Value) bool {
// match: (IsInBounds idx len)
// cond:
// result: (SGTU len idx)
return true
}
}
-func rewriteValueMIPS64_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpIsNonNil(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (IsNonNil ptr)
// cond:
// result: (SGTU ptr (MOVVconst [0]))
ptr := v.Args[0]
v.reset(OpMIPS64SGTU)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpIsSliceInBounds(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (IsSliceInBounds idx len)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU idx len))
idx := v.Args[0]
len := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
v1.AddArg(idx)
v1.AddArg(len)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq32 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
return true
}
}
-func rewriteValueMIPS64_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq32U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLeq64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq64 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, fe.TypeBool())
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64F x y)
return true
}
}
-func rewriteValueMIPS64_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq64U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
v1.AddArg(x)
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLeq8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8 x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGT, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8U x y)
// cond:
// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64XOR)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 1
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLess16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLess16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16 x y)
// cond:
// result: (SGT (SignExt16to64 y) (SignExt16to64 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLess16U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLess16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16U x y)
// cond:
// result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLess32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLess32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less32 x y)
// cond:
// result: (SGT (SignExt32to64 y) (SignExt32to64 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLess32F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLess32F(v *Value) bool {
b := v.Block
_ = b
// match: (Less32F x y)
return true
}
}
-func rewriteValueMIPS64_OpLess32U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLess32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less32U x y)
// cond:
// result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLess64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpLess64(v *Value) bool {
// match: (Less64 x y)
// cond:
// result: (SGT y x)
return true
}
}
-func rewriteValueMIPS64_OpLess64F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLess64F(v *Value) bool {
b := v.Block
_ = b
// match: (Less64F x y)
return true
}
}
-func rewriteValueMIPS64_OpLess64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpLess64U(v *Value) bool {
// match: (Less64U x y)
// cond:
// result: (SGTU y x)
return true
}
}
-func rewriteValueMIPS64_OpLess8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLess8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8 x y)
// cond:
// result: (SGT (SignExt8to64 y) (SignExt8to64 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGT)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLess8U(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLess8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8U x y)
// cond:
// result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpLoad(v *Value) bool {
// match: (Load <t> ptr mem)
// cond: t.IsBoolean()
// result: (MOVBUload ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
return true
}
}
-func rewriteValueMIPS64_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
return true
}
}
-func rewriteValueMIPS64_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh64x16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh64x32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh64x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh64x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
return true
}
}
-func rewriteValueMIPS64_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
return true
}
}
-func rewriteValueMIPS64_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpMIPS64ADDV(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool {
// match: (ADDV (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (ADDVconst [c] x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool {
// match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr))
// cond:
// result: (MOVVaddr [off1+off2] {sym} ptr)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64AND(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool {
// match: (AND (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (ANDconst [c] x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool {
// match: (ANDconst [0] _)
// cond:
// result: (MOVVconst [0])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool {
// match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVBUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool {
// match: (MOVBUreg x:(MOVBUload _ _))
// cond:
// result: (MOVVreg x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool {
// match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVBload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool {
// match: (MOVBreg x:(MOVBload _ _))
// cond:
// result: (MOVVreg x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
// match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
// match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
// match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVDload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool {
// match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool {
// match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVFload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool {
// match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVFstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool {
// match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVHUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool {
// match: (MOVHUreg x:(MOVBUload _ _))
// cond:
// result: (MOVVreg x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool {
// match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVHload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool {
// match: (MOVHreg x:(MOVBload _ _))
// cond:
// result: (MOVVreg x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
// match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVHstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
// match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
// match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVVload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool {
// match: (MOVVreg x)
// cond: x.Uses == 1
// result: (MOVVnop x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool {
// match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVVstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool {
// match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVVstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool {
// match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVWUload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool {
// match: (MOVWUreg x:(MOVBUload _ _))
// cond:
// result: (MOVVreg x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool {
// match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVWload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool {
// match: (MOVWreg x:(MOVBload _ _))
// cond:
// result: (MOVVreg x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
// match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2)
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool {
// match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
// cond: is32Bit(off1+off2)
// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64NEGV(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool {
// match: (NEGV (MOVVconst [c]))
// cond:
// result: (MOVVconst [-c])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64NOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool {
// match: (NOR (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (NORconst [c] x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64NORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool {
// match: (NORconst [c] (MOVVconst [d]))
// cond:
// result: (MOVVconst [^(c|d)])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64OR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
// match: (OR (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (ORconst [c] x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64ORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool {
// match: (ORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SGT(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool {
// match: (SGT (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (SGTconst [c] x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SGTU(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool {
// match: (SGTU (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (SGTUconst [c] x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value) bool {
// match: (SGTUconst [c] (MOVVconst [d]))
// cond: uint64(c)>uint64(d)
// result: (MOVVconst [1])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value) bool {
// match: (SGTconst [c] (MOVVconst [d]))
// cond: int64(c)>int64(d)
// result: (MOVVconst [1])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SLLV(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SLLV(v *Value) bool {
// match: (SLLV _ (MOVVconst [c]))
// cond: uint64(c)>=64
// result: (MOVVconst [0])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value) bool {
// match: (SLLVconst [c] (MOVVconst [d]))
// cond:
// result: (MOVVconst [int64(d)<<uint64(c)])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SRAV(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SRAV(v *Value) bool {
// match: (SRAV x (MOVVconst [c]))
// cond: uint64(c)>=64
// result: (SRAVconst x [63])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value) bool {
// match: (SRAVconst [c] (MOVVconst [d]))
// cond:
// result: (MOVVconst [int64(d)>>uint64(c)])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SRLV(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SRLV(v *Value) bool {
// match: (SRLV _ (MOVVconst [c]))
// cond: uint64(c)>=64
// result: (MOVVconst [0])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value) bool {
// match: (SRLVconst [c] (MOVVconst [d]))
// cond:
// result: (MOVVconst [int64(uint64(d)>>uint64(c))])
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SUBV(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool {
// match: (SUBV x (MOVVconst [c]))
// cond: is32Bit(c)
// result: (SUBVconst [c] x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool {
// match: (SUBVconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueMIPS64_OpMIPS64XOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool {
// match: (XOR (MOVVconst [c]) x)
// cond: is32Bit(c)
// result: (XORconst [c] x)
}
return false
}
-func rewriteValueMIPS64_OpMIPS64XORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool {
// match: (XORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueMIPS64_OpMod16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMod16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16 x y)
// cond:
// result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v1 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMod16u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMod16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16u x y)
// cond:
// result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v1 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMod32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMod32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32 x y)
// cond:
// result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMod32u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMod32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32u x y)
// cond:
// result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMod64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMod64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod64 x y)
// cond:
// result: (Select0 (DIVV x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMod64u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMod64u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod64u x y)
// cond:
// result: (Select0 (DIVVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMod8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMod8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8 x y)
// cond:
// result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
- v1 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMod8u(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMod8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8u x y)
// cond:
// result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect0)
- v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMove(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMove(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Move [0] _ _ mem)
// cond:
// result: mem
mem := v.Args[2]
v.reset(OpMIPS64MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
}
v.reset(OpMIPS64MOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 1
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
}
v.reset(OpMIPS64MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, fe.TypeInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
}
v.reset(OpMIPS64MOVVstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, fe.TypeUInt64())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpMIPS64MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, fe.TypeInt32())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, fe.TypeInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v3.AuxInt = 2
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v4.AuxInt = 2
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, config.fe.TypeInt8())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, fe.TypeInt8())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, config.fe.TypeInt16())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, fe.TypeInt16())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v.reset(OpMIPS64MOVWstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, fe.TypeInt32())
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, fe.TypeInt32())
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, config.fe.TypeInt32())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, fe.TypeInt32())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v.reset(OpMIPS64MOVVstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, fe.TypeUInt64())
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, fe.TypeUInt64())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpMIPS64MOVVstore)
v.AuxInt = 16
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, fe.TypeUInt64())
v0.AuxInt = 16
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, fe.TypeUInt64())
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, fe.TypeUInt64())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
}
return false
}
-func rewriteValueMIPS64_OpMul16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMul16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul16 x y)
// cond:
// result: (Select1 (MULVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMul32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMul32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul32 x y)
// cond:
// result: (Select1 (MULVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMul32F(v *Value) bool {
// match: (Mul32F x y)
// cond:
// result: (MULF x y)
return true
}
}
-func rewriteValueMIPS64_OpMul64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMul64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul64 x y)
// cond:
// result: (Select1 (MULVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpMul64F(v *Value) bool {
// match: (Mul64F x y)
// cond:
// result: (MULD x y)
return true
}
}
-func rewriteValueMIPS64_OpMul8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpMul8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul8 x y)
// cond:
// result: (Select1 (MULVU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNeg16(v *Value) bool {
// match: (Neg16 x)
// cond:
// result: (NEGV x)
return true
}
}
-func rewriteValueMIPS64_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNeg32(v *Value) bool {
// match: (Neg32 x)
// cond:
// result: (NEGV x)
return true
}
}
-func rewriteValueMIPS64_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNeg32F(v *Value) bool {
// match: (Neg32F x)
// cond:
// result: (NEGF x)
return true
}
}
-func rewriteValueMIPS64_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNeg64(v *Value) bool {
// match: (Neg64 x)
// cond:
// result: (NEGV x)
return true
}
}
-func rewriteValueMIPS64_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNeg64F(v *Value) bool {
// match: (Neg64F x)
// cond:
// result: (NEGD x)
return true
}
}
-func rewriteValueMIPS64_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNeg8(v *Value) bool {
// match: (Neg8 x)
// cond:
// result: (NEGV x)
return true
}
}
-func rewriteValueMIPS64_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpNeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq16 x y)
// cond:
// result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v3.AuxInt = 0
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS64_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpNeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq32 x y)
// cond:
// result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v3.AuxInt = 0
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS64_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpNeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32F x y)
return true
}
}
-func rewriteValueMIPS64_OpNeq64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpNeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq64 x y)
// cond:
// result: (SGTU (XOR x y) (MOVVconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v1.AuxInt = 0
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpNeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
return true
}
}
-func rewriteValueMIPS64_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpNeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq8 x y)
// cond:
// result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v3.AuxInt = 0
v.AddArg(v3)
return true
}
}
-func rewriteValueMIPS64_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNeqB(v *Value) bool {
// match: (NeqB x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS64_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpNeqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (NeqPtr x y)
// cond:
// result: (SGTU (XOR x y) (MOVVconst [0]))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SGTU)
- v0 := b.NewValue0(v.Pos, OpMIPS64XOR, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, fe.TypeUInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v1.AuxInt = 0
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNilCheck(v *Value) bool {
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
return true
}
}
-func rewriteValueMIPS64_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpNot(v *Value) bool {
// match: (Not x)
// cond:
// result: (XORconst [1] x)
return true
}
}
-func rewriteValueMIPS64_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpOffPtr(v *Value) bool {
// match: (OffPtr [off] ptr:(SP))
// cond:
// result: (MOVVaddr [off] ptr)
return true
}
}
-func rewriteValueMIPS64_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpOr16(v *Value) bool {
// match: (Or16 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS64_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpOr32(v *Value) bool {
// match: (Or32 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS64_OpOr64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpOr64(v *Value) bool {
// match: (Or64 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS64_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpOr8(v *Value) bool {
// match: (Or8 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS64_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpOrB(v *Value) bool {
// match: (OrB x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueMIPS64_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpRound32F(v *Value) bool {
// match: (Round32F x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS64_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpRound64F(v *Value) bool {
// match: (Round64F x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS64_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(x)
v3.AddArg(v4)
v3.AddArg(y)
return true
}
}
-func rewriteValueMIPS64_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x16 <t> x y)
// cond:
- // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x32 <t> x y)
// cond:
- // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x64 <t> x y)
// cond:
- // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
v3.AddArg(y)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 63
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueMIPS64_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x8 <t> x y)
// cond:
- // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt16to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(x)
v3.AddArg(v4)
v3.AddArg(y)
return true
}
}
-func rewriteValueMIPS64_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x16 <t> x y)
// cond:
- // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x32 <t> x y)
// cond:
- // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x64 <t> x y)
// cond:
- // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
v3.AddArg(y)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 63
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueMIPS64_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x8 <t> x y)
// cond:
- // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
return true
}
}
-func rewriteValueMIPS64_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x16 <t> x y)
// cond:
- // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 63
v2.AddArg(v4)
v1.AddArg(v2)
v0.AddArg(v1)
- v5 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v5.AddArg(y)
v0.AddArg(v5)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x32 <t> x y)
// cond:
- // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 63
v2.AddArg(v4)
v1.AddArg(v2)
v0.AddArg(v1)
- v5 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v5.AddArg(y)
v0.AddArg(v5)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x64 <t> x y)
// cond:
- // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
for {
t := v.Type
x := v.Args[0]
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
v2.AddArg(y)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = 63
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueMIPS64_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x8 <t> x y)
// cond:
- // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 63
v2.AddArg(v4)
v1.AddArg(v2)
v0.AddArg(v1)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(y)
v0.AddArg(v5)
v.AddArg(v0)
return true
}
}
-func rewriteValueMIPS64_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux16 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux32 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux64 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
v1.AddArg(y)
v0.AddArg(v1)
v.AddArg(v0)
v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(x)
v3.AddArg(v4)
v3.AddArg(y)
return true
}
}
-func rewriteValueMIPS64_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux8 <t> x y)
// cond:
- // result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ // result: (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64AND)
v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 64
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
- v5 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v6.AddArg(y)
v4.AddArg(v6)
v.AddArg(v4)
return true
}
}
-func rewriteValueMIPS64_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x16 <t> x y)
// cond:
- // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x32 <t> x y)
// cond:
- // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x64 <t> x y)
// cond:
- // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
v3.AddArg(y)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 63
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueMIPS64_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x8 <t> x y)
// cond:
- // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
for {
t := v.Type
x := v.Args[0]
y := v.Args[1]
v.reset(OpMIPS64SRAV)
- v0 := b.NewValue0(v.Pos, OpSignExt8to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
- v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v3.AddArg(v5)
v2.AddArg(v3)
v1.AddArg(v2)
- v6 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v6.AddArg(y)
v1.AddArg(v6)
v.AddArg(v1)
return true
}
}
-func rewriteValueMIPS64_OpSelect0(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSelect0(v *Value) bool {
// match: (Select0 (DIVVU _ (MOVVconst [1])))
// cond:
// result: (MOVVconst [0])
}
return false
}
-func rewriteValueMIPS64_OpSelect1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSelect1(v *Value) bool {
// match: (Select1 (MULVU x (MOVVconst [-1])))
// cond:
// result: (NEGV x)
}
return false
}
-func rewriteValueMIPS64_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValueMIPS64_OpSignExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSignExt16to64(v *Value) bool {
// match: (SignExt16to64 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValueMIPS64_OpSignExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSignExt32to64(v *Value) bool {
// match: (SignExt32to64 x)
// cond:
// result: (MOVWreg x)
return true
}
}
-func rewriteValueMIPS64_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueMIPS64_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueMIPS64_OpSignExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSignExt8to64(v *Value) bool {
// match: (SignExt8to64 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueMIPS64_OpSlicemask(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpSlicemask(v *Value) bool {
b := v.Block
_ = b
// match: (Slicemask <t> x)
return true
}
}
-func rewriteValueMIPS64_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpStaticCall(v *Value) bool {
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
return true
}
}
-func rewriteValueMIPS64_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpStore(v *Value) bool {
// match: (Store {t} ptr val mem)
// cond: t.(Type).Size() == 1
// result: (MOVBstore ptr val mem)
}
return false
}
-func rewriteValueMIPS64_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSub16(v *Value) bool {
// match: (Sub16 x y)
// cond:
// result: (SUBV x y)
return true
}
}
-func rewriteValueMIPS64_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSub32(v *Value) bool {
// match: (Sub32 x y)
// cond:
// result: (SUBV x y)
return true
}
}
-func rewriteValueMIPS64_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSub32F(v *Value) bool {
// match: (Sub32F x y)
// cond:
// result: (SUBF x y)
return true
}
}
-func rewriteValueMIPS64_OpSub64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSub64(v *Value) bool {
// match: (Sub64 x y)
// cond:
// result: (SUBV x y)
return true
}
}
-func rewriteValueMIPS64_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSub64F(v *Value) bool {
// match: (Sub64F x y)
// cond:
// result: (SUBD x y)
return true
}
}
-func rewriteValueMIPS64_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSub8(v *Value) bool {
// match: (Sub8 x y)
// cond:
// result: (SUBV x y)
return true
}
}
-func rewriteValueMIPS64_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpSubPtr(v *Value) bool {
// match: (SubPtr x y)
// cond:
// result: (SUBV x y)
return true
}
}
-func rewriteValueMIPS64_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS64_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS64_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS64_OpTrunc64to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpTrunc64to16(v *Value) bool {
// match: (Trunc64to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS64_OpTrunc64to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpTrunc64to32(v *Value) bool {
// match: (Trunc64to32 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS64_OpTrunc64to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpTrunc64to8(v *Value) bool {
// match: (Trunc64to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueMIPS64_OpXor16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpXor16(v *Value) bool {
// match: (Xor16 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS64_OpXor32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpXor32(v *Value) bool {
// match: (Xor32 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS64_OpXor64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpXor64(v *Value) bool {
// match: (Xor64 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS64_OpXor8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpXor8(v *Value) bool {
// match: (Xor8 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueMIPS64_OpZero(v *Value, config *Config) bool {
+func rewriteValueMIPS64_OpZero(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Zero [0] _ mem)
// cond:
// result: mem
mem := v.Args[1]
v.reset(OpMIPS64MOVBstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
}
v.reset(OpMIPS64MOVHstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 1
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
}
v.reset(OpMIPS64MOVWstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 3
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v3.AuxInt = 1
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v4.AuxInt = 0
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
}
v.reset(OpMIPS64MOVVstore)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(mem)
v.reset(OpMIPS64MOVWstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 6
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v3.AuxInt = 2
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v4.AuxInt = 0
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v5.AuxInt = 0
v5.AddArg(ptr)
- v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v6.AuxInt = 0
v5.AddArg(v6)
v5.AddArg(mem)
v.reset(OpMIPS64MOVBstore)
v.AuxInt = 2
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v1.AuxInt = 1
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
v.reset(OpMIPS64MOVHstore)
v.AuxInt = 4
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
v.reset(OpMIPS64MOVWstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
v.reset(OpMIPS64MOVVstore)
v.AuxInt = 8
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
v1.AuxInt = 0
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v1.AddArg(mem)
v.reset(OpMIPS64MOVVstore)
v.AuxInt = 16
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
v1.AuxInt = 8
v1.AddArg(ptr)
- v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v2.AuxInt = 0
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, TypeMem)
v3.AuxInt = 0
v3.AddArg(ptr)
- v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, fe.TypeUInt64())
v4.AuxInt = 0
v3.AddArg(v4)
v3.AddArg(mem)
}
return false
}
-func rewriteValueMIPS64_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVHUreg x)
return true
}
}
-func rewriteValueMIPS64_OpZeroExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpZeroExt16to64(v *Value) bool {
// match: (ZeroExt16to64 x)
// cond:
// result: (MOVHUreg x)
return true
}
}
-func rewriteValueMIPS64_OpZeroExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpZeroExt32to64(v *Value) bool {
// match: (ZeroExt32to64 x)
// cond:
// result: (MOVWUreg x)
return true
}
}
-func rewriteValueMIPS64_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteValueMIPS64_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteValueMIPS64_OpZeroExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueMIPS64_OpZeroExt8to64(v *Value) bool {
// match: (ZeroExt8to64 x)
// cond:
// result: (MOVBUreg x)
return true
}
}
-func rewriteBlockMIPS64(b *Block, config *Config) bool {
+func rewriteBlockMIPS64(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case BlockMIPS64EQ:
// match: (EQ (FPFlagTrue cmp) yes no)
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValuePPC64(v *Value, config *Config) bool {
+func rewriteValuePPC64(v *Value) bool {
switch v.Op {
case OpAdd16:
- return rewriteValuePPC64_OpAdd16(v, config)
+ return rewriteValuePPC64_OpAdd16(v)
case OpAdd32:
- return rewriteValuePPC64_OpAdd32(v, config)
+ return rewriteValuePPC64_OpAdd32(v)
case OpAdd32F:
- return rewriteValuePPC64_OpAdd32F(v, config)
+ return rewriteValuePPC64_OpAdd32F(v)
case OpAdd64:
- return rewriteValuePPC64_OpAdd64(v, config)
+ return rewriteValuePPC64_OpAdd64(v)
case OpAdd64F:
- return rewriteValuePPC64_OpAdd64F(v, config)
+ return rewriteValuePPC64_OpAdd64F(v)
case OpAdd8:
- return rewriteValuePPC64_OpAdd8(v, config)
+ return rewriteValuePPC64_OpAdd8(v)
case OpAddPtr:
- return rewriteValuePPC64_OpAddPtr(v, config)
+ return rewriteValuePPC64_OpAddPtr(v)
case OpAddr:
- return rewriteValuePPC64_OpAddr(v, config)
+ return rewriteValuePPC64_OpAddr(v)
case OpAnd16:
- return rewriteValuePPC64_OpAnd16(v, config)
+ return rewriteValuePPC64_OpAnd16(v)
case OpAnd32:
- return rewriteValuePPC64_OpAnd32(v, config)
+ return rewriteValuePPC64_OpAnd32(v)
case OpAnd64:
- return rewriteValuePPC64_OpAnd64(v, config)
+ return rewriteValuePPC64_OpAnd64(v)
case OpAnd8:
- return rewriteValuePPC64_OpAnd8(v, config)
+ return rewriteValuePPC64_OpAnd8(v)
case OpAndB:
- return rewriteValuePPC64_OpAndB(v, config)
+ return rewriteValuePPC64_OpAndB(v)
case OpAtomicAdd32:
- return rewriteValuePPC64_OpAtomicAdd32(v, config)
+ return rewriteValuePPC64_OpAtomicAdd32(v)
case OpAtomicAdd64:
- return rewriteValuePPC64_OpAtomicAdd64(v, config)
+ return rewriteValuePPC64_OpAtomicAdd64(v)
case OpAtomicAnd8:
- return rewriteValuePPC64_OpAtomicAnd8(v, config)
+ return rewriteValuePPC64_OpAtomicAnd8(v)
case OpAtomicCompareAndSwap32:
- return rewriteValuePPC64_OpAtomicCompareAndSwap32(v, config)
+ return rewriteValuePPC64_OpAtomicCompareAndSwap32(v)
case OpAtomicCompareAndSwap64:
- return rewriteValuePPC64_OpAtomicCompareAndSwap64(v, config)
+ return rewriteValuePPC64_OpAtomicCompareAndSwap64(v)
case OpAtomicExchange32:
- return rewriteValuePPC64_OpAtomicExchange32(v, config)
+ return rewriteValuePPC64_OpAtomicExchange32(v)
case OpAtomicExchange64:
- return rewriteValuePPC64_OpAtomicExchange64(v, config)
+ return rewriteValuePPC64_OpAtomicExchange64(v)
case OpAtomicLoad32:
- return rewriteValuePPC64_OpAtomicLoad32(v, config)
+ return rewriteValuePPC64_OpAtomicLoad32(v)
case OpAtomicLoad64:
- return rewriteValuePPC64_OpAtomicLoad64(v, config)
+ return rewriteValuePPC64_OpAtomicLoad64(v)
case OpAtomicLoadPtr:
- return rewriteValuePPC64_OpAtomicLoadPtr(v, config)
+ return rewriteValuePPC64_OpAtomicLoadPtr(v)
case OpAtomicOr8:
- return rewriteValuePPC64_OpAtomicOr8(v, config)
+ return rewriteValuePPC64_OpAtomicOr8(v)
case OpAtomicStore32:
- return rewriteValuePPC64_OpAtomicStore32(v, config)
+ return rewriteValuePPC64_OpAtomicStore32(v)
case OpAtomicStore64:
- return rewriteValuePPC64_OpAtomicStore64(v, config)
+ return rewriteValuePPC64_OpAtomicStore64(v)
case OpAvg64u:
- return rewriteValuePPC64_OpAvg64u(v, config)
+ return rewriteValuePPC64_OpAvg64u(v)
case OpClosureCall:
- return rewriteValuePPC64_OpClosureCall(v, config)
+ return rewriteValuePPC64_OpClosureCall(v)
case OpCom16:
- return rewriteValuePPC64_OpCom16(v, config)
+ return rewriteValuePPC64_OpCom16(v)
case OpCom32:
- return rewriteValuePPC64_OpCom32(v, config)
+ return rewriteValuePPC64_OpCom32(v)
case OpCom64:
- return rewriteValuePPC64_OpCom64(v, config)
+ return rewriteValuePPC64_OpCom64(v)
case OpCom8:
- return rewriteValuePPC64_OpCom8(v, config)
+ return rewriteValuePPC64_OpCom8(v)
case OpConst16:
- return rewriteValuePPC64_OpConst16(v, config)
+ return rewriteValuePPC64_OpConst16(v)
case OpConst32:
- return rewriteValuePPC64_OpConst32(v, config)
+ return rewriteValuePPC64_OpConst32(v)
case OpConst32F:
- return rewriteValuePPC64_OpConst32F(v, config)
+ return rewriteValuePPC64_OpConst32F(v)
case OpConst64:
- return rewriteValuePPC64_OpConst64(v, config)
+ return rewriteValuePPC64_OpConst64(v)
case OpConst64F:
- return rewriteValuePPC64_OpConst64F(v, config)
+ return rewriteValuePPC64_OpConst64F(v)
case OpConst8:
- return rewriteValuePPC64_OpConst8(v, config)
+ return rewriteValuePPC64_OpConst8(v)
case OpConstBool:
- return rewriteValuePPC64_OpConstBool(v, config)
+ return rewriteValuePPC64_OpConstBool(v)
case OpConstNil:
- return rewriteValuePPC64_OpConstNil(v, config)
+ return rewriteValuePPC64_OpConstNil(v)
case OpConvert:
- return rewriteValuePPC64_OpConvert(v, config)
+ return rewriteValuePPC64_OpConvert(v)
case OpCvt32Fto32:
- return rewriteValuePPC64_OpCvt32Fto32(v, config)
+ return rewriteValuePPC64_OpCvt32Fto32(v)
case OpCvt32Fto64:
- return rewriteValuePPC64_OpCvt32Fto64(v, config)
+ return rewriteValuePPC64_OpCvt32Fto64(v)
case OpCvt32Fto64F:
- return rewriteValuePPC64_OpCvt32Fto64F(v, config)
+ return rewriteValuePPC64_OpCvt32Fto64F(v)
case OpCvt32to32F:
- return rewriteValuePPC64_OpCvt32to32F(v, config)
+ return rewriteValuePPC64_OpCvt32to32F(v)
case OpCvt32to64F:
- return rewriteValuePPC64_OpCvt32to64F(v, config)
+ return rewriteValuePPC64_OpCvt32to64F(v)
case OpCvt64Fto32:
- return rewriteValuePPC64_OpCvt64Fto32(v, config)
+ return rewriteValuePPC64_OpCvt64Fto32(v)
case OpCvt64Fto32F:
- return rewriteValuePPC64_OpCvt64Fto32F(v, config)
+ return rewriteValuePPC64_OpCvt64Fto32F(v)
case OpCvt64Fto64:
- return rewriteValuePPC64_OpCvt64Fto64(v, config)
+ return rewriteValuePPC64_OpCvt64Fto64(v)
case OpCvt64to32F:
- return rewriteValuePPC64_OpCvt64to32F(v, config)
+ return rewriteValuePPC64_OpCvt64to32F(v)
case OpCvt64to64F:
- return rewriteValuePPC64_OpCvt64to64F(v, config)
+ return rewriteValuePPC64_OpCvt64to64F(v)
case OpDiv16:
- return rewriteValuePPC64_OpDiv16(v, config)
+ return rewriteValuePPC64_OpDiv16(v)
case OpDiv16u:
- return rewriteValuePPC64_OpDiv16u(v, config)
+ return rewriteValuePPC64_OpDiv16u(v)
case OpDiv32:
- return rewriteValuePPC64_OpDiv32(v, config)
+ return rewriteValuePPC64_OpDiv32(v)
case OpDiv32F:
- return rewriteValuePPC64_OpDiv32F(v, config)
+ return rewriteValuePPC64_OpDiv32F(v)
case OpDiv32u:
- return rewriteValuePPC64_OpDiv32u(v, config)
+ return rewriteValuePPC64_OpDiv32u(v)
case OpDiv64:
- return rewriteValuePPC64_OpDiv64(v, config)
+ return rewriteValuePPC64_OpDiv64(v)
case OpDiv64F:
- return rewriteValuePPC64_OpDiv64F(v, config)
+ return rewriteValuePPC64_OpDiv64F(v)
case OpDiv64u:
- return rewriteValuePPC64_OpDiv64u(v, config)
+ return rewriteValuePPC64_OpDiv64u(v)
case OpDiv8:
- return rewriteValuePPC64_OpDiv8(v, config)
+ return rewriteValuePPC64_OpDiv8(v)
case OpDiv8u:
- return rewriteValuePPC64_OpDiv8u(v, config)
+ return rewriteValuePPC64_OpDiv8u(v)
case OpEq16:
- return rewriteValuePPC64_OpEq16(v, config)
+ return rewriteValuePPC64_OpEq16(v)
case OpEq32:
- return rewriteValuePPC64_OpEq32(v, config)
+ return rewriteValuePPC64_OpEq32(v)
case OpEq32F:
- return rewriteValuePPC64_OpEq32F(v, config)
+ return rewriteValuePPC64_OpEq32F(v)
case OpEq64:
- return rewriteValuePPC64_OpEq64(v, config)
+ return rewriteValuePPC64_OpEq64(v)
case OpEq64F:
- return rewriteValuePPC64_OpEq64F(v, config)
+ return rewriteValuePPC64_OpEq64F(v)
case OpEq8:
- return rewriteValuePPC64_OpEq8(v, config)
+ return rewriteValuePPC64_OpEq8(v)
case OpEqB:
- return rewriteValuePPC64_OpEqB(v, config)
+ return rewriteValuePPC64_OpEqB(v)
case OpEqPtr:
- return rewriteValuePPC64_OpEqPtr(v, config)
+ return rewriteValuePPC64_OpEqPtr(v)
case OpGeq16:
- return rewriteValuePPC64_OpGeq16(v, config)
+ return rewriteValuePPC64_OpGeq16(v)
case OpGeq16U:
- return rewriteValuePPC64_OpGeq16U(v, config)
+ return rewriteValuePPC64_OpGeq16U(v)
case OpGeq32:
- return rewriteValuePPC64_OpGeq32(v, config)
+ return rewriteValuePPC64_OpGeq32(v)
case OpGeq32F:
- return rewriteValuePPC64_OpGeq32F(v, config)
+ return rewriteValuePPC64_OpGeq32F(v)
case OpGeq32U:
- return rewriteValuePPC64_OpGeq32U(v, config)
+ return rewriteValuePPC64_OpGeq32U(v)
case OpGeq64:
- return rewriteValuePPC64_OpGeq64(v, config)
+ return rewriteValuePPC64_OpGeq64(v)
case OpGeq64F:
- return rewriteValuePPC64_OpGeq64F(v, config)
+ return rewriteValuePPC64_OpGeq64F(v)
case OpGeq64U:
- return rewriteValuePPC64_OpGeq64U(v, config)
+ return rewriteValuePPC64_OpGeq64U(v)
case OpGeq8:
- return rewriteValuePPC64_OpGeq8(v, config)
+ return rewriteValuePPC64_OpGeq8(v)
case OpGeq8U:
- return rewriteValuePPC64_OpGeq8U(v, config)
+ return rewriteValuePPC64_OpGeq8U(v)
case OpGetClosurePtr:
- return rewriteValuePPC64_OpGetClosurePtr(v, config)
+ return rewriteValuePPC64_OpGetClosurePtr(v)
case OpGreater16:
- return rewriteValuePPC64_OpGreater16(v, config)
+ return rewriteValuePPC64_OpGreater16(v)
case OpGreater16U:
- return rewriteValuePPC64_OpGreater16U(v, config)
+ return rewriteValuePPC64_OpGreater16U(v)
case OpGreater32:
- return rewriteValuePPC64_OpGreater32(v, config)
+ return rewriteValuePPC64_OpGreater32(v)
case OpGreater32F:
- return rewriteValuePPC64_OpGreater32F(v, config)
+ return rewriteValuePPC64_OpGreater32F(v)
case OpGreater32U:
- return rewriteValuePPC64_OpGreater32U(v, config)
+ return rewriteValuePPC64_OpGreater32U(v)
case OpGreater64:
- return rewriteValuePPC64_OpGreater64(v, config)
+ return rewriteValuePPC64_OpGreater64(v)
case OpGreater64F:
- return rewriteValuePPC64_OpGreater64F(v, config)
+ return rewriteValuePPC64_OpGreater64F(v)
case OpGreater64U:
- return rewriteValuePPC64_OpGreater64U(v, config)
+ return rewriteValuePPC64_OpGreater64U(v)
case OpGreater8:
- return rewriteValuePPC64_OpGreater8(v, config)
+ return rewriteValuePPC64_OpGreater8(v)
case OpGreater8U:
- return rewriteValuePPC64_OpGreater8U(v, config)
+ return rewriteValuePPC64_OpGreater8U(v)
case OpHmul32:
- return rewriteValuePPC64_OpHmul32(v, config)
+ return rewriteValuePPC64_OpHmul32(v)
case OpHmul32u:
- return rewriteValuePPC64_OpHmul32u(v, config)
+ return rewriteValuePPC64_OpHmul32u(v)
case OpHmul64:
- return rewriteValuePPC64_OpHmul64(v, config)
+ return rewriteValuePPC64_OpHmul64(v)
case OpHmul64u:
- return rewriteValuePPC64_OpHmul64u(v, config)
+ return rewriteValuePPC64_OpHmul64u(v)
case OpInterCall:
- return rewriteValuePPC64_OpInterCall(v, config)
+ return rewriteValuePPC64_OpInterCall(v)
case OpIsInBounds:
- return rewriteValuePPC64_OpIsInBounds(v, config)
+ return rewriteValuePPC64_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValuePPC64_OpIsNonNil(v, config)
+ return rewriteValuePPC64_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValuePPC64_OpIsSliceInBounds(v, config)
+ return rewriteValuePPC64_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValuePPC64_OpLeq16(v, config)
+ return rewriteValuePPC64_OpLeq16(v)
case OpLeq16U:
- return rewriteValuePPC64_OpLeq16U(v, config)
+ return rewriteValuePPC64_OpLeq16U(v)
case OpLeq32:
- return rewriteValuePPC64_OpLeq32(v, config)
+ return rewriteValuePPC64_OpLeq32(v)
case OpLeq32F:
- return rewriteValuePPC64_OpLeq32F(v, config)
+ return rewriteValuePPC64_OpLeq32F(v)
case OpLeq32U:
- return rewriteValuePPC64_OpLeq32U(v, config)
+ return rewriteValuePPC64_OpLeq32U(v)
case OpLeq64:
- return rewriteValuePPC64_OpLeq64(v, config)
+ return rewriteValuePPC64_OpLeq64(v)
case OpLeq64F:
- return rewriteValuePPC64_OpLeq64F(v, config)
+ return rewriteValuePPC64_OpLeq64F(v)
case OpLeq64U:
- return rewriteValuePPC64_OpLeq64U(v, config)
+ return rewriteValuePPC64_OpLeq64U(v)
case OpLeq8:
- return rewriteValuePPC64_OpLeq8(v, config)
+ return rewriteValuePPC64_OpLeq8(v)
case OpLeq8U:
- return rewriteValuePPC64_OpLeq8U(v, config)
+ return rewriteValuePPC64_OpLeq8U(v)
case OpLess16:
- return rewriteValuePPC64_OpLess16(v, config)
+ return rewriteValuePPC64_OpLess16(v)
case OpLess16U:
- return rewriteValuePPC64_OpLess16U(v, config)
+ return rewriteValuePPC64_OpLess16U(v)
case OpLess32:
- return rewriteValuePPC64_OpLess32(v, config)
+ return rewriteValuePPC64_OpLess32(v)
case OpLess32F:
- return rewriteValuePPC64_OpLess32F(v, config)
+ return rewriteValuePPC64_OpLess32F(v)
case OpLess32U:
- return rewriteValuePPC64_OpLess32U(v, config)
+ return rewriteValuePPC64_OpLess32U(v)
case OpLess64:
- return rewriteValuePPC64_OpLess64(v, config)
+ return rewriteValuePPC64_OpLess64(v)
case OpLess64F:
- return rewriteValuePPC64_OpLess64F(v, config)
+ return rewriteValuePPC64_OpLess64F(v)
case OpLess64U:
- return rewriteValuePPC64_OpLess64U(v, config)
+ return rewriteValuePPC64_OpLess64U(v)
case OpLess8:
- return rewriteValuePPC64_OpLess8(v, config)
+ return rewriteValuePPC64_OpLess8(v)
case OpLess8U:
- return rewriteValuePPC64_OpLess8U(v, config)
+ return rewriteValuePPC64_OpLess8U(v)
case OpLoad:
- return rewriteValuePPC64_OpLoad(v, config)
+ return rewriteValuePPC64_OpLoad(v)
case OpLsh16x16:
- return rewriteValuePPC64_OpLsh16x16(v, config)
+ return rewriteValuePPC64_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValuePPC64_OpLsh16x32(v, config)
+ return rewriteValuePPC64_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValuePPC64_OpLsh16x64(v, config)
+ return rewriteValuePPC64_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValuePPC64_OpLsh16x8(v, config)
+ return rewriteValuePPC64_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValuePPC64_OpLsh32x16(v, config)
+ return rewriteValuePPC64_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValuePPC64_OpLsh32x32(v, config)
+ return rewriteValuePPC64_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValuePPC64_OpLsh32x64(v, config)
+ return rewriteValuePPC64_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValuePPC64_OpLsh32x8(v, config)
+ return rewriteValuePPC64_OpLsh32x8(v)
case OpLsh64x16:
- return rewriteValuePPC64_OpLsh64x16(v, config)
+ return rewriteValuePPC64_OpLsh64x16(v)
case OpLsh64x32:
- return rewriteValuePPC64_OpLsh64x32(v, config)
+ return rewriteValuePPC64_OpLsh64x32(v)
case OpLsh64x64:
- return rewriteValuePPC64_OpLsh64x64(v, config)
+ return rewriteValuePPC64_OpLsh64x64(v)
case OpLsh64x8:
- return rewriteValuePPC64_OpLsh64x8(v, config)
+ return rewriteValuePPC64_OpLsh64x8(v)
case OpLsh8x16:
- return rewriteValuePPC64_OpLsh8x16(v, config)
+ return rewriteValuePPC64_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValuePPC64_OpLsh8x32(v, config)
+ return rewriteValuePPC64_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValuePPC64_OpLsh8x64(v, config)
+ return rewriteValuePPC64_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValuePPC64_OpLsh8x8(v, config)
+ return rewriteValuePPC64_OpLsh8x8(v)
case OpMod16:
- return rewriteValuePPC64_OpMod16(v, config)
+ return rewriteValuePPC64_OpMod16(v)
case OpMod16u:
- return rewriteValuePPC64_OpMod16u(v, config)
+ return rewriteValuePPC64_OpMod16u(v)
case OpMod32:
- return rewriteValuePPC64_OpMod32(v, config)
+ return rewriteValuePPC64_OpMod32(v)
case OpMod32u:
- return rewriteValuePPC64_OpMod32u(v, config)
+ return rewriteValuePPC64_OpMod32u(v)
case OpMod64:
- return rewriteValuePPC64_OpMod64(v, config)
+ return rewriteValuePPC64_OpMod64(v)
case OpMod64u:
- return rewriteValuePPC64_OpMod64u(v, config)
+ return rewriteValuePPC64_OpMod64u(v)
case OpMod8:
- return rewriteValuePPC64_OpMod8(v, config)
+ return rewriteValuePPC64_OpMod8(v)
case OpMod8u:
- return rewriteValuePPC64_OpMod8u(v, config)
+ return rewriteValuePPC64_OpMod8u(v)
case OpMove:
- return rewriteValuePPC64_OpMove(v, config)
+ return rewriteValuePPC64_OpMove(v)
case OpMul16:
- return rewriteValuePPC64_OpMul16(v, config)
+ return rewriteValuePPC64_OpMul16(v)
case OpMul32:
- return rewriteValuePPC64_OpMul32(v, config)
+ return rewriteValuePPC64_OpMul32(v)
case OpMul32F:
- return rewriteValuePPC64_OpMul32F(v, config)
+ return rewriteValuePPC64_OpMul32F(v)
case OpMul64:
- return rewriteValuePPC64_OpMul64(v, config)
+ return rewriteValuePPC64_OpMul64(v)
case OpMul64F:
- return rewriteValuePPC64_OpMul64F(v, config)
+ return rewriteValuePPC64_OpMul64F(v)
case OpMul8:
- return rewriteValuePPC64_OpMul8(v, config)
+ return rewriteValuePPC64_OpMul8(v)
case OpNeg16:
- return rewriteValuePPC64_OpNeg16(v, config)
+ return rewriteValuePPC64_OpNeg16(v)
case OpNeg32:
- return rewriteValuePPC64_OpNeg32(v, config)
+ return rewriteValuePPC64_OpNeg32(v)
case OpNeg32F:
- return rewriteValuePPC64_OpNeg32F(v, config)
+ return rewriteValuePPC64_OpNeg32F(v)
case OpNeg64:
- return rewriteValuePPC64_OpNeg64(v, config)
+ return rewriteValuePPC64_OpNeg64(v)
case OpNeg64F:
- return rewriteValuePPC64_OpNeg64F(v, config)
+ return rewriteValuePPC64_OpNeg64F(v)
case OpNeg8:
- return rewriteValuePPC64_OpNeg8(v, config)
+ return rewriteValuePPC64_OpNeg8(v)
case OpNeq16:
- return rewriteValuePPC64_OpNeq16(v, config)
+ return rewriteValuePPC64_OpNeq16(v)
case OpNeq32:
- return rewriteValuePPC64_OpNeq32(v, config)
+ return rewriteValuePPC64_OpNeq32(v)
case OpNeq32F:
- return rewriteValuePPC64_OpNeq32F(v, config)
+ return rewriteValuePPC64_OpNeq32F(v)
case OpNeq64:
- return rewriteValuePPC64_OpNeq64(v, config)
+ return rewriteValuePPC64_OpNeq64(v)
case OpNeq64F:
- return rewriteValuePPC64_OpNeq64F(v, config)
+ return rewriteValuePPC64_OpNeq64F(v)
case OpNeq8:
- return rewriteValuePPC64_OpNeq8(v, config)
+ return rewriteValuePPC64_OpNeq8(v)
case OpNeqB:
- return rewriteValuePPC64_OpNeqB(v, config)
+ return rewriteValuePPC64_OpNeqB(v)
case OpNeqPtr:
- return rewriteValuePPC64_OpNeqPtr(v, config)
+ return rewriteValuePPC64_OpNeqPtr(v)
case OpNilCheck:
- return rewriteValuePPC64_OpNilCheck(v, config)
+ return rewriteValuePPC64_OpNilCheck(v)
case OpNot:
- return rewriteValuePPC64_OpNot(v, config)
+ return rewriteValuePPC64_OpNot(v)
case OpOffPtr:
- return rewriteValuePPC64_OpOffPtr(v, config)
+ return rewriteValuePPC64_OpOffPtr(v)
case OpOr16:
- return rewriteValuePPC64_OpOr16(v, config)
+ return rewriteValuePPC64_OpOr16(v)
case OpOr32:
- return rewriteValuePPC64_OpOr32(v, config)
+ return rewriteValuePPC64_OpOr32(v)
case OpOr64:
- return rewriteValuePPC64_OpOr64(v, config)
+ return rewriteValuePPC64_OpOr64(v)
case OpOr8:
- return rewriteValuePPC64_OpOr8(v, config)
+ return rewriteValuePPC64_OpOr8(v)
case OpOrB:
- return rewriteValuePPC64_OpOrB(v, config)
+ return rewriteValuePPC64_OpOrB(v)
case OpPPC64ADD:
- return rewriteValuePPC64_OpPPC64ADD(v, config)
+ return rewriteValuePPC64_OpPPC64ADD(v)
case OpPPC64ADDconst:
- return rewriteValuePPC64_OpPPC64ADDconst(v, config)
+ return rewriteValuePPC64_OpPPC64ADDconst(v)
case OpPPC64AND:
- return rewriteValuePPC64_OpPPC64AND(v, config)
+ return rewriteValuePPC64_OpPPC64AND(v)
case OpPPC64ANDconst:
- return rewriteValuePPC64_OpPPC64ANDconst(v, config)
+ return rewriteValuePPC64_OpPPC64ANDconst(v)
case OpPPC64CMP:
- return rewriteValuePPC64_OpPPC64CMP(v, config)
+ return rewriteValuePPC64_OpPPC64CMP(v)
case OpPPC64CMPU:
- return rewriteValuePPC64_OpPPC64CMPU(v, config)
+ return rewriteValuePPC64_OpPPC64CMPU(v)
case OpPPC64CMPUconst:
- return rewriteValuePPC64_OpPPC64CMPUconst(v, config)
+ return rewriteValuePPC64_OpPPC64CMPUconst(v)
case OpPPC64CMPW:
- return rewriteValuePPC64_OpPPC64CMPW(v, config)
+ return rewriteValuePPC64_OpPPC64CMPW(v)
case OpPPC64CMPWU:
- return rewriteValuePPC64_OpPPC64CMPWU(v, config)
+ return rewriteValuePPC64_OpPPC64CMPWU(v)
case OpPPC64CMPWUconst:
- return rewriteValuePPC64_OpPPC64CMPWUconst(v, config)
+ return rewriteValuePPC64_OpPPC64CMPWUconst(v)
case OpPPC64CMPWconst:
- return rewriteValuePPC64_OpPPC64CMPWconst(v, config)
+ return rewriteValuePPC64_OpPPC64CMPWconst(v)
case OpPPC64CMPconst:
- return rewriteValuePPC64_OpPPC64CMPconst(v, config)
+ return rewriteValuePPC64_OpPPC64CMPconst(v)
case OpPPC64Equal:
- return rewriteValuePPC64_OpPPC64Equal(v, config)
+ return rewriteValuePPC64_OpPPC64Equal(v)
case OpPPC64FMOVDload:
- return rewriteValuePPC64_OpPPC64FMOVDload(v, config)
+ return rewriteValuePPC64_OpPPC64FMOVDload(v)
case OpPPC64FMOVDstore:
- return rewriteValuePPC64_OpPPC64FMOVDstore(v, config)
+ return rewriteValuePPC64_OpPPC64FMOVDstore(v)
case OpPPC64FMOVSload:
- return rewriteValuePPC64_OpPPC64FMOVSload(v, config)
+ return rewriteValuePPC64_OpPPC64FMOVSload(v)
case OpPPC64FMOVSstore:
- return rewriteValuePPC64_OpPPC64FMOVSstore(v, config)
+ return rewriteValuePPC64_OpPPC64FMOVSstore(v)
case OpPPC64GreaterEqual:
- return rewriteValuePPC64_OpPPC64GreaterEqual(v, config)
+ return rewriteValuePPC64_OpPPC64GreaterEqual(v)
case OpPPC64GreaterThan:
- return rewriteValuePPC64_OpPPC64GreaterThan(v, config)
+ return rewriteValuePPC64_OpPPC64GreaterThan(v)
case OpPPC64LessEqual:
- return rewriteValuePPC64_OpPPC64LessEqual(v, config)
+ return rewriteValuePPC64_OpPPC64LessEqual(v)
case OpPPC64LessThan:
- return rewriteValuePPC64_OpPPC64LessThan(v, config)
+ return rewriteValuePPC64_OpPPC64LessThan(v)
case OpPPC64MOVBZload:
- return rewriteValuePPC64_OpPPC64MOVBZload(v, config)
+ return rewriteValuePPC64_OpPPC64MOVBZload(v)
case OpPPC64MOVBZreg:
- return rewriteValuePPC64_OpPPC64MOVBZreg(v, config)
+ return rewriteValuePPC64_OpPPC64MOVBZreg(v)
case OpPPC64MOVBreg:
- return rewriteValuePPC64_OpPPC64MOVBreg(v, config)
+ return rewriteValuePPC64_OpPPC64MOVBreg(v)
case OpPPC64MOVBstore:
- return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
+ return rewriteValuePPC64_OpPPC64MOVBstore(v)
case OpPPC64MOVBstorezero:
- return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
+ return rewriteValuePPC64_OpPPC64MOVBstorezero(v)
case OpPPC64MOVDload:
- return rewriteValuePPC64_OpPPC64MOVDload(v, config)
+ return rewriteValuePPC64_OpPPC64MOVDload(v)
case OpPPC64MOVDstore:
- return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
+ return rewriteValuePPC64_OpPPC64MOVDstore(v)
case OpPPC64MOVDstorezero:
- return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
+ return rewriteValuePPC64_OpPPC64MOVDstorezero(v)
case OpPPC64MOVHZload:
- return rewriteValuePPC64_OpPPC64MOVHZload(v, config)
+ return rewriteValuePPC64_OpPPC64MOVHZload(v)
case OpPPC64MOVHZreg:
- return rewriteValuePPC64_OpPPC64MOVHZreg(v, config)
+ return rewriteValuePPC64_OpPPC64MOVHZreg(v)
case OpPPC64MOVHload:
- return rewriteValuePPC64_OpPPC64MOVHload(v, config)
+ return rewriteValuePPC64_OpPPC64MOVHload(v)
case OpPPC64MOVHreg:
- return rewriteValuePPC64_OpPPC64MOVHreg(v, config)
+ return rewriteValuePPC64_OpPPC64MOVHreg(v)
case OpPPC64MOVHstore:
- return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
+ return rewriteValuePPC64_OpPPC64MOVHstore(v)
case OpPPC64MOVHstorezero:
- return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
+ return rewriteValuePPC64_OpPPC64MOVHstorezero(v)
case OpPPC64MOVWZload:
- return rewriteValuePPC64_OpPPC64MOVWZload(v, config)
+ return rewriteValuePPC64_OpPPC64MOVWZload(v)
case OpPPC64MOVWZreg:
- return rewriteValuePPC64_OpPPC64MOVWZreg(v, config)
+ return rewriteValuePPC64_OpPPC64MOVWZreg(v)
case OpPPC64MOVWload:
- return rewriteValuePPC64_OpPPC64MOVWload(v, config)
+ return rewriteValuePPC64_OpPPC64MOVWload(v)
case OpPPC64MOVWreg:
- return rewriteValuePPC64_OpPPC64MOVWreg(v, config)
+ return rewriteValuePPC64_OpPPC64MOVWreg(v)
case OpPPC64MOVWstore:
- return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
+ return rewriteValuePPC64_OpPPC64MOVWstore(v)
case OpPPC64MOVWstorezero:
- return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
+ return rewriteValuePPC64_OpPPC64MOVWstorezero(v)
case OpPPC64MaskIfNotCarry:
- return rewriteValuePPC64_OpPPC64MaskIfNotCarry(v, config)
+ return rewriteValuePPC64_OpPPC64MaskIfNotCarry(v)
case OpPPC64NotEqual:
- return rewriteValuePPC64_OpPPC64NotEqual(v, config)
+ return rewriteValuePPC64_OpPPC64NotEqual(v)
case OpPPC64OR:
- return rewriteValuePPC64_OpPPC64OR(v, config)
+ return rewriteValuePPC64_OpPPC64OR(v)
case OpPPC64ORN:
- return rewriteValuePPC64_OpPPC64ORN(v, config)
+ return rewriteValuePPC64_OpPPC64ORN(v)
case OpPPC64ORconst:
- return rewriteValuePPC64_OpPPC64ORconst(v, config)
+ return rewriteValuePPC64_OpPPC64ORconst(v)
case OpPPC64SUB:
- return rewriteValuePPC64_OpPPC64SUB(v, config)
+ return rewriteValuePPC64_OpPPC64SUB(v)
case OpPPC64XOR:
- return rewriteValuePPC64_OpPPC64XOR(v, config)
+ return rewriteValuePPC64_OpPPC64XOR(v)
case OpPPC64XORconst:
- return rewriteValuePPC64_OpPPC64XORconst(v, config)
+ return rewriteValuePPC64_OpPPC64XORconst(v)
case OpRound32F:
- return rewriteValuePPC64_OpRound32F(v, config)
+ return rewriteValuePPC64_OpRound32F(v)
case OpRound64F:
- return rewriteValuePPC64_OpRound64F(v, config)
+ return rewriteValuePPC64_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValuePPC64_OpRsh16Ux16(v, config)
+ return rewriteValuePPC64_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValuePPC64_OpRsh16Ux32(v, config)
+ return rewriteValuePPC64_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValuePPC64_OpRsh16Ux64(v, config)
+ return rewriteValuePPC64_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValuePPC64_OpRsh16Ux8(v, config)
+ return rewriteValuePPC64_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValuePPC64_OpRsh16x16(v, config)
+ return rewriteValuePPC64_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValuePPC64_OpRsh16x32(v, config)
+ return rewriteValuePPC64_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValuePPC64_OpRsh16x64(v, config)
+ return rewriteValuePPC64_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValuePPC64_OpRsh16x8(v, config)
+ return rewriteValuePPC64_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValuePPC64_OpRsh32Ux16(v, config)
+ return rewriteValuePPC64_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValuePPC64_OpRsh32Ux32(v, config)
+ return rewriteValuePPC64_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValuePPC64_OpRsh32Ux64(v, config)
+ return rewriteValuePPC64_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValuePPC64_OpRsh32Ux8(v, config)
+ return rewriteValuePPC64_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValuePPC64_OpRsh32x16(v, config)
+ return rewriteValuePPC64_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValuePPC64_OpRsh32x32(v, config)
+ return rewriteValuePPC64_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValuePPC64_OpRsh32x64(v, config)
+ return rewriteValuePPC64_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValuePPC64_OpRsh32x8(v, config)
+ return rewriteValuePPC64_OpRsh32x8(v)
case OpRsh64Ux16:
- return rewriteValuePPC64_OpRsh64Ux16(v, config)
+ return rewriteValuePPC64_OpRsh64Ux16(v)
case OpRsh64Ux32:
- return rewriteValuePPC64_OpRsh64Ux32(v, config)
+ return rewriteValuePPC64_OpRsh64Ux32(v)
case OpRsh64Ux64:
- return rewriteValuePPC64_OpRsh64Ux64(v, config)
+ return rewriteValuePPC64_OpRsh64Ux64(v)
case OpRsh64Ux8:
- return rewriteValuePPC64_OpRsh64Ux8(v, config)
+ return rewriteValuePPC64_OpRsh64Ux8(v)
case OpRsh64x16:
- return rewriteValuePPC64_OpRsh64x16(v, config)
+ return rewriteValuePPC64_OpRsh64x16(v)
case OpRsh64x32:
- return rewriteValuePPC64_OpRsh64x32(v, config)
+ return rewriteValuePPC64_OpRsh64x32(v)
case OpRsh64x64:
- return rewriteValuePPC64_OpRsh64x64(v, config)
+ return rewriteValuePPC64_OpRsh64x64(v)
case OpRsh64x8:
- return rewriteValuePPC64_OpRsh64x8(v, config)
+ return rewriteValuePPC64_OpRsh64x8(v)
case OpRsh8Ux16:
- return rewriteValuePPC64_OpRsh8Ux16(v, config)
+ return rewriteValuePPC64_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValuePPC64_OpRsh8Ux32(v, config)
+ return rewriteValuePPC64_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValuePPC64_OpRsh8Ux64(v, config)
+ return rewriteValuePPC64_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValuePPC64_OpRsh8Ux8(v, config)
+ return rewriteValuePPC64_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValuePPC64_OpRsh8x16(v, config)
+ return rewriteValuePPC64_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValuePPC64_OpRsh8x32(v, config)
+ return rewriteValuePPC64_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValuePPC64_OpRsh8x64(v, config)
+ return rewriteValuePPC64_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValuePPC64_OpRsh8x8(v, config)
+ return rewriteValuePPC64_OpRsh8x8(v)
case OpSignExt16to32:
- return rewriteValuePPC64_OpSignExt16to32(v, config)
+ return rewriteValuePPC64_OpSignExt16to32(v)
case OpSignExt16to64:
- return rewriteValuePPC64_OpSignExt16to64(v, config)
+ return rewriteValuePPC64_OpSignExt16to64(v)
case OpSignExt32to64:
- return rewriteValuePPC64_OpSignExt32to64(v, config)
+ return rewriteValuePPC64_OpSignExt32to64(v)
case OpSignExt8to16:
- return rewriteValuePPC64_OpSignExt8to16(v, config)
+ return rewriteValuePPC64_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValuePPC64_OpSignExt8to32(v, config)
+ return rewriteValuePPC64_OpSignExt8to32(v)
case OpSignExt8to64:
- return rewriteValuePPC64_OpSignExt8to64(v, config)
+ return rewriteValuePPC64_OpSignExt8to64(v)
case OpSlicemask:
- return rewriteValuePPC64_OpSlicemask(v, config)
+ return rewriteValuePPC64_OpSlicemask(v)
case OpSqrt:
- return rewriteValuePPC64_OpSqrt(v, config)
+ return rewriteValuePPC64_OpSqrt(v)
case OpStaticCall:
- return rewriteValuePPC64_OpStaticCall(v, config)
+ return rewriteValuePPC64_OpStaticCall(v)
case OpStore:
- return rewriteValuePPC64_OpStore(v, config)
+ return rewriteValuePPC64_OpStore(v)
case OpSub16:
- return rewriteValuePPC64_OpSub16(v, config)
+ return rewriteValuePPC64_OpSub16(v)
case OpSub32:
- return rewriteValuePPC64_OpSub32(v, config)
+ return rewriteValuePPC64_OpSub32(v)
case OpSub32F:
- return rewriteValuePPC64_OpSub32F(v, config)
+ return rewriteValuePPC64_OpSub32F(v)
case OpSub64:
- return rewriteValuePPC64_OpSub64(v, config)
+ return rewriteValuePPC64_OpSub64(v)
case OpSub64F:
- return rewriteValuePPC64_OpSub64F(v, config)
+ return rewriteValuePPC64_OpSub64F(v)
case OpSub8:
- return rewriteValuePPC64_OpSub8(v, config)
+ return rewriteValuePPC64_OpSub8(v)
case OpSubPtr:
- return rewriteValuePPC64_OpSubPtr(v, config)
+ return rewriteValuePPC64_OpSubPtr(v)
case OpTrunc16to8:
- return rewriteValuePPC64_OpTrunc16to8(v, config)
+ return rewriteValuePPC64_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValuePPC64_OpTrunc32to16(v, config)
+ return rewriteValuePPC64_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValuePPC64_OpTrunc32to8(v, config)
+ return rewriteValuePPC64_OpTrunc32to8(v)
case OpTrunc64to16:
- return rewriteValuePPC64_OpTrunc64to16(v, config)
+ return rewriteValuePPC64_OpTrunc64to16(v)
case OpTrunc64to32:
- return rewriteValuePPC64_OpTrunc64to32(v, config)
+ return rewriteValuePPC64_OpTrunc64to32(v)
case OpTrunc64to8:
- return rewriteValuePPC64_OpTrunc64to8(v, config)
+ return rewriteValuePPC64_OpTrunc64to8(v)
case OpXor16:
- return rewriteValuePPC64_OpXor16(v, config)
+ return rewriteValuePPC64_OpXor16(v)
case OpXor32:
- return rewriteValuePPC64_OpXor32(v, config)
+ return rewriteValuePPC64_OpXor32(v)
case OpXor64:
- return rewriteValuePPC64_OpXor64(v, config)
+ return rewriteValuePPC64_OpXor64(v)
case OpXor8:
- return rewriteValuePPC64_OpXor8(v, config)
+ return rewriteValuePPC64_OpXor8(v)
case OpZero:
- return rewriteValuePPC64_OpZero(v, config)
+ return rewriteValuePPC64_OpZero(v)
case OpZeroExt16to32:
- return rewriteValuePPC64_OpZeroExt16to32(v, config)
+ return rewriteValuePPC64_OpZeroExt16to32(v)
case OpZeroExt16to64:
- return rewriteValuePPC64_OpZeroExt16to64(v, config)
+ return rewriteValuePPC64_OpZeroExt16to64(v)
case OpZeroExt32to64:
- return rewriteValuePPC64_OpZeroExt32to64(v, config)
+ return rewriteValuePPC64_OpZeroExt32to64(v)
case OpZeroExt8to16:
- return rewriteValuePPC64_OpZeroExt8to16(v, config)
+ return rewriteValuePPC64_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValuePPC64_OpZeroExt8to32(v, config)
+ return rewriteValuePPC64_OpZeroExt8to32(v)
case OpZeroExt8to64:
- return rewriteValuePPC64_OpZeroExt8to64(v, config)
+ return rewriteValuePPC64_OpZeroExt8to64(v)
}
return false
}
-func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAdd16(v *Value) bool {
// match: (Add16 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValuePPC64_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAdd32(v *Value) bool {
// match: (Add32 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValuePPC64_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAdd32F(v *Value) bool {
// match: (Add32F x y)
// cond:
// result: (FADDS x y)
return true
}
}
-func rewriteValuePPC64_OpAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAdd64(v *Value) bool {
// match: (Add64 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValuePPC64_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAdd64F(v *Value) bool {
// match: (Add64F x y)
// cond:
// result: (FADD x y)
return true
}
}
-func rewriteValuePPC64_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAdd8(v *Value) bool {
// match: (Add8 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValuePPC64_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAddPtr(v *Value) bool {
// match: (AddPtr x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValuePPC64_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAddr(v *Value) bool {
// match: (Addr {sym} base)
// cond:
// result: (MOVDaddr {sym} base)
return true
}
}
-func rewriteValuePPC64_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAnd16(v *Value) bool {
// match: (And16 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValuePPC64_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAnd32(v *Value) bool {
// match: (And32 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValuePPC64_OpAnd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAnd64(v *Value) bool {
// match: (And64 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValuePPC64_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAnd8(v *Value) bool {
// match: (And8 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValuePPC64_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAndB(v *Value) bool {
// match: (AndB x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValuePPC64_OpAtomicAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicAdd32(v *Value) bool {
// match: (AtomicAdd32 ptr val mem)
// cond:
// result: (LoweredAtomicAdd32 ptr val mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicAdd64(v *Value) bool {
// match: (AtomicAdd64 ptr val mem)
// cond:
// result: (LoweredAtomicAdd64 ptr val mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicAnd8(v *Value) bool {
// match: (AtomicAnd8 ptr val mem)
// cond:
// result: (LoweredAtomicAnd8 ptr val mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value) bool {
// match: (AtomicCompareAndSwap32 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas32 ptr old new_ mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicCompareAndSwap64(v *Value) bool {
// match: (AtomicCompareAndSwap64 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas64 ptr old new_ mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicExchange32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicExchange32(v *Value) bool {
// match: (AtomicExchange32 ptr val mem)
// cond:
// result: (LoweredAtomicExchange32 ptr val mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicExchange64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicExchange64(v *Value) bool {
// match: (AtomicExchange64 ptr val mem)
// cond:
// result: (LoweredAtomicExchange64 ptr val mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicLoad32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool {
// match: (AtomicLoad32 ptr mem)
// cond:
// result: (LoweredAtomicLoad32 ptr mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicLoad64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicLoad64(v *Value) bool {
// match: (AtomicLoad64 ptr mem)
// cond:
// result: (LoweredAtomicLoad64 ptr mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicLoadPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
// result: (LoweredAtomicLoadPtr ptr mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicOr8(v *Value) bool {
// match: (AtomicOr8 ptr val mem)
// cond:
// result: (LoweredAtomicOr8 ptr val mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicStore32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicStore32(v *Value) bool {
// match: (AtomicStore32 ptr val mem)
// cond:
// result: (LoweredAtomicStore32 ptr val mem)
return true
}
}
-func rewriteValuePPC64_OpAtomicStore64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpAtomicStore64(v *Value) bool {
// match: (AtomicStore64 ptr val mem)
// cond:
// result: (LoweredAtomicStore64 ptr val mem)
return true
}
}
-func rewriteValuePPC64_OpAvg64u(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpAvg64u(v *Value) bool {
b := v.Block
_ = b
// match: (Avg64u <t> x y)
return true
}
}
-func rewriteValuePPC64_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpClosureCall(v *Value) bool {
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
return true
}
}
-func rewriteValuePPC64_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpCom16(v *Value) bool {
// match: (Com16 x)
// cond:
// result: (NOR x x)
return true
}
}
-func rewriteValuePPC64_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpCom32(v *Value) bool {
// match: (Com32 x)
// cond:
// result: (NOR x x)
return true
}
}
-func rewriteValuePPC64_OpCom64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpCom64(v *Value) bool {
// match: (Com64 x)
// cond:
// result: (NOR x x)
return true
}
}
-func rewriteValuePPC64_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpCom8(v *Value) bool {
// match: (Com8 x)
// cond:
// result: (NOR x x)
return true
}
}
-func rewriteValuePPC64_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConst16(v *Value) bool {
// match: (Const16 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValuePPC64_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConst32(v *Value) bool {
// match: (Const32 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValuePPC64_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// cond:
// result: (FMOVSconst [val])
return true
}
}
-func rewriteValuePPC64_OpConst64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConst64(v *Value) bool {
// match: (Const64 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValuePPC64_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// cond:
// result: (FMOVDconst [val])
return true
}
}
-func rewriteValuePPC64_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConst8(v *Value) bool {
// match: (Const8 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValuePPC64_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// cond:
// result: (MOVDconst [b])
return true
}
}
-func rewriteValuePPC64_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConstNil(v *Value) bool {
// match: (ConstNil)
// cond:
// result: (MOVDconst [0])
return true
}
}
-func rewriteValuePPC64_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpConvert(v *Value) bool {
// match: (Convert <t> x mem)
// cond:
// result: (MOVDconvert <t> x mem)
return true
}
}
-func rewriteValuePPC64_OpCvt32Fto32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpCvt32Fto32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Cvt32Fto32 x)
// cond:
// result: (Xf2i64 (FCTIWZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
- v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, config.fe.TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpCvt32Fto64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpCvt32Fto64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Cvt32Fto64 x)
// cond:
// result: (Xf2i64 (FCTIDZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
- v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, config.fe.TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F x)
// cond:
// result: x
return true
}
}
-func rewriteValuePPC64_OpCvt32to32F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpCvt32to32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Cvt32to32F x)
// cond:
// result: (FRSP (FCFID (Xi2f64 (SignExt32to64 x))))
for {
x := v.Args[0]
v.reset(OpPPC64FRSP)
- v0 := b.NewValue0(v.Pos, OpPPC64FCFID, config.fe.TypeFloat64())
- v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, config.fe.TypeFloat64())
- v2 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64FCFID, fe.TypeFloat64())
+ v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, fe.TypeFloat64())
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v2.AddArg(x)
v1.AddArg(v2)
v0.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpCvt32to64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpCvt32to64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Cvt32to64F x)
// cond:
// result: (FCFID (Xi2f64 (SignExt32to64 x)))
for {
x := v.Args[0]
v.reset(OpPPC64FCFID)
- v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, config.fe.TypeFloat64())
- v1 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, fe.TypeFloat64())
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpCvt64Fto32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpCvt64Fto32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Cvt64Fto32 x)
// cond:
// result: (Xf2i64 (FCTIWZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
- v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, config.fe.TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F x)
// cond:
// result: (FRSP x)
return true
}
}
-func rewriteValuePPC64_OpCvt64Fto64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpCvt64Fto64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Cvt64Fto64 x)
// cond:
// result: (Xf2i64 (FCTIDZ x))
for {
x := v.Args[0]
v.reset(OpPPC64Xf2i64)
- v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, config.fe.TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpCvt64to32F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpCvt64to32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Cvt64to32F x)
// cond:
// result: (FRSP (FCFID (Xi2f64 x)))
for {
x := v.Args[0]
v.reset(OpPPC64FRSP)
- v0 := b.NewValue0(v.Pos, OpPPC64FCFID, config.fe.TypeFloat64())
- v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, config.fe.TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpPPC64FCFID, fe.TypeFloat64())
+ v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, fe.TypeFloat64())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpCvt64to64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpCvt64to64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Cvt64to64F x)
// cond:
// result: (FCFID (Xi2f64 x))
for {
x := v.Args[0]
v.reset(OpPPC64FCFID)
- v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, config.fe.TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, fe.TypeFloat64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpDiv16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpDiv16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16 x y)
// cond:
// result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpDiv16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16u x y)
// cond:
// result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVWU)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpDiv32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpDiv32(v *Value) bool {
// match: (Div32 x y)
// cond:
// result: (DIVW x y)
return true
}
}
-func rewriteValuePPC64_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpDiv32F(v *Value) bool {
// match: (Div32F x y)
// cond:
// result: (FDIVS x y)
return true
}
}
-func rewriteValuePPC64_OpDiv32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpDiv32u(v *Value) bool {
// match: (Div32u x y)
// cond:
// result: (DIVWU x y)
return true
}
}
-func rewriteValuePPC64_OpDiv64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpDiv64(v *Value) bool {
// match: (Div64 x y)
// cond:
// result: (DIVD x y)
return true
}
}
-func rewriteValuePPC64_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpDiv64F(v *Value) bool {
// match: (Div64F x y)
// cond:
// result: (FDIV x y)
return true
}
}
-func rewriteValuePPC64_OpDiv64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpDiv64u(v *Value) bool {
// match: (Div64u x y)
// cond:
// result: (DIVDU x y)
return true
}
}
-func rewriteValuePPC64_OpDiv8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 x y)
// cond:
// result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u x y)
// cond:
// result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64DIVWU)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpEq16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpEq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq16 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
}
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpEq32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpEq32(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32 x y)
return true
}
}
-func rewriteValuePPC64_OpEq32F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpEq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32F x y)
return true
}
}
-func rewriteValuePPC64_OpEq64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpEq64(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64 x y)
return true
}
}
-func rewriteValuePPC64_OpEq64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpEq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64F x y)
return true
}
}
-func rewriteValuePPC64_OpEq8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpEq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq8 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
}
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
y := v.Args[1]
v.reset(OpPPC64Equal)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpEqB(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpEqB(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqB x y)
// cond:
// result: (ANDconst [1] (EQV x y))
y := v.Args[1]
v.reset(OpPPC64ANDconst)
v.AuxInt = 1
- v0 := b.NewValue0(v.Pos, OpPPC64EQV, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64EQV, fe.TypeInt64())
v0.AddArg(x)
v0.AddArg(y)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
// match: (EqPtr x y)
return true
}
}
-func rewriteValuePPC64_OpGeq16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16U x y)
// cond:
// result: (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpGeq32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32 x y)
return true
}
}
-func rewriteValuePPC64_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32F x y)
return true
}
}
-func rewriteValuePPC64_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq32U x y)
return true
}
}
-func rewriteValuePPC64_OpGeq64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64 x y)
return true
}
}
-func rewriteValuePPC64_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64F x y)
return true
}
}
-func rewriteValuePPC64_OpGeq64U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq64U(v *Value) bool {
b := v.Block
_ = b
// match: (Geq64U x y)
return true
}
}
-func rewriteValuePPC64_OpGeq8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8 x y)
// cond:
// result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8U x y)
// cond:
// result: (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpPPC64GreaterEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpGetClosurePtr(v *Value) bool {
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
return true
}
}
-func rewriteValuePPC64_OpGreater16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16U x y)
// cond:
// result: (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpGreater32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater32(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32 x y)
return true
}
}
-func rewriteValuePPC64_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater32F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32F x y)
return true
}
}
-func rewriteValuePPC64_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater32U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater32U x y)
return true
}
}
-func rewriteValuePPC64_OpGreater64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater64(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64 x y)
return true
}
}
-func rewriteValuePPC64_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater64F(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64F x y)
return true
}
}
-func rewriteValuePPC64_OpGreater64U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater64U(v *Value) bool {
b := v.Block
_ = b
// match: (Greater64U x y)
return true
}
}
-func rewriteValuePPC64_OpGreater8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8 x y)
// cond:
// result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpGreater8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8U x y)
// cond:
// result: (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpPPC64GreaterThan)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpHmul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpHmul32(v *Value) bool {
// match: (Hmul32 x y)
// cond:
// result: (MULHW x y)
return true
}
}
-func rewriteValuePPC64_OpHmul32u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpHmul32u(v *Value) bool {
// match: (Hmul32u x y)
// cond:
// result: (MULHWU x y)
return true
}
}
-func rewriteValuePPC64_OpHmul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpHmul64(v *Value) bool {
// match: (Hmul64 x y)
// cond:
// result: (MULHD x y)
return true
}
}
-func rewriteValuePPC64_OpHmul64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpHmul64u(v *Value) bool {
// match: (Hmul64u x y)
// cond:
// result: (MULHDU x y)
return true
}
}
-func rewriteValuePPC64_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpInterCall(v *Value) bool {
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
return true
}
}
-func rewriteValuePPC64_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpIsInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsInBounds idx len)
return true
}
}
-func rewriteValuePPC64_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpIsNonNil(v *Value) bool {
b := v.Block
_ = b
// match: (IsNonNil ptr)
return true
}
}
-func rewriteValuePPC64_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpIsSliceInBounds(v *Value) bool {
b := v.Block
_ = b
// match: (IsSliceInBounds idx len)
return true
}
}
-func rewriteValuePPC64_OpLeq16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16 x y)
// cond:
// result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16U x y)
// cond:
// result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpLeq32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32 x y)
return true
}
}
-func rewriteValuePPC64_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32F x y)
return true
}
}
-func rewriteValuePPC64_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq32U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq32U x y)
return true
}
}
-func rewriteValuePPC64_OpLeq64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64 x y)
return true
}
}
-func rewriteValuePPC64_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64F x y)
return true
}
}
-func rewriteValuePPC64_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq64U(v *Value) bool {
b := v.Block
_ = b
// match: (Leq64U x y)
return true
}
}
-func rewriteValuePPC64_OpLeq8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8 x y)
// cond:
// result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8U x y)
// cond:
// result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpPPC64LessEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpLess16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16 x y)
// cond:
// result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpLess16U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16U x y)
// cond:
// result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpLess32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess32(v *Value) bool {
b := v.Block
_ = b
// match: (Less32 x y)
return true
}
}
-func rewriteValuePPC64_OpLess32F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess32F(v *Value) bool {
b := v.Block
_ = b
// match: (Less32F x y)
return true
}
}
-func rewriteValuePPC64_OpLess32U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess32U(v *Value) bool {
b := v.Block
_ = b
// match: (Less32U x y)
return true
}
}
-func rewriteValuePPC64_OpLess64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess64(v *Value) bool {
b := v.Block
_ = b
// match: (Less64 x y)
return true
}
}
-func rewriteValuePPC64_OpLess64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess64F(v *Value) bool {
b := v.Block
_ = b
// match: (Less64F x y)
return true
}
}
-func rewriteValuePPC64_OpLess64U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess64U(v *Value) bool {
b := v.Block
_ = b
// match: (Less64U x y)
return true
}
}
-func rewriteValuePPC64_OpLess8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8 x y)
// cond:
// result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpLess8U(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLess8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8U x y)
// cond:
// result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
y := v.Args[1]
v.reset(OpPPC64LessThan)
v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpLoad(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLoad(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t))
// result: (MOVDload ptr mem)
break
}
v.reset(OpPPC64MOVBreg)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
}
return false
}
-func rewriteValuePPC64_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x16 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -16
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x32 x (Const64 [c]))
// cond: uint32(c) < 16
// result: (SLWconst x [c])
}
// match: (Lsh16x32 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -16
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SLWconst x [c])
}
// match: (Lsh16x64 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -16
v2.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x8 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -16
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x16 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x32 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SLWconst x [c])
}
// match: (Lsh32x32 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SLWconst x [c])
}
// match: (Lsh32x64 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x8 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh64x16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x16 x y)
// cond:
- // result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ // result: (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh64x32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh64x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x32 x (Const64 [c]))
// cond: uint32(c) < 64
// result: (SLDconst x [c])
}
// match: (Lsh64x32 x y)
// cond:
- // result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ // result: (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh64x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x64 x (Const64 [c]))
// cond: uint64(c) < 64
// result: (SLDconst x [c])
}
// match: (Lsh64x64 x y)
// cond:
- // result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ // result: (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x8 x y)
// cond:
- // result: (SLD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ // result: (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x16 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -8
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x32 x (Const64 [c]))
// cond: uint32(c) < 8
// result: (SLWconst x [c])
}
// match: (Lsh8x32 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -8
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SLWconst x [c])
}
// match: (Lsh8x64 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -8
v2.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x8 x y)
// cond:
- // result: (SLW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ // result: (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SLW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -8
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpMod16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMod16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16 x y)
// cond:
// result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpMod16u(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMod16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16u x y)
// cond:
// result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpMod32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMod32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32 x y)
// cond:
// result: (SUB x (MULLW y (DIVW x y)))
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64MULLW, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, fe.TypeInt32())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64DIVW, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVW, fe.TypeInt32())
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpMod32u(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMod32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32u x y)
// cond:
// result: (SUB x (MULLW y (DIVWU x y)))
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64MULLW, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, fe.TypeInt32())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, fe.TypeInt32())
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpMod64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMod64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod64 x y)
// cond:
// result: (SUB x (MULLD y (DIVD x y)))
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64MULLD, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64DIVD, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVD, fe.TypeInt64())
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpMod64u(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMod64u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod64u x y)
// cond:
// result: (SUB x (MULLD y (DIVDU x y)))
y := v.Args[1]
v.reset(OpPPC64SUB)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64MULLD, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, fe.TypeInt64())
v1.AddArg(x)
v1.AddArg(y)
v0.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpMod8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMod8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8 x y)
// cond:
// result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpMod8u(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMod8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8u x y)
// cond:
// result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpMod32u)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMove(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Move [0] _ _ mem)
// cond:
// result: mem
mem := v.Args[2]
v.reset(OpPPC64MOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
}
v.reset(OpPPC64MOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpPPC64MOVBstore)
v.AuxInt = 1
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v0.AuxInt = 1
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVBstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
}
v.reset(OpPPC64MOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVWload, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWload, fe.TypeInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpPPC64MOVHstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, fe.TypeUInt16())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, fe.TypeUInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpPPC64MOVBstore)
v.AuxInt = 3
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v0.AuxInt = 3
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpPPC64MOVBstore, TypeMem)
v1.AuxInt = 2
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v2.AuxInt = 2
v2.AddArg(src)
v2.AddArg(mem)
v3 := b.NewValue0(v.Pos, OpPPC64MOVBstore, TypeMem)
v3.AuxInt = 1
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v4.AuxInt = 1
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpPPC64MOVBstore, TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v6 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
}
v.reset(OpPPC64MOVDstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, fe.TypeInt64())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpPPC64MOVWstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, fe.TypeUInt32())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpPPC64MOVHstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, fe.TypeUInt16())
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, fe.TypeUInt16())
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v3 := b.NewValue0(v.Pos, OpPPC64MOVHstore, TypeMem)
v3.AuxInt = 2
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVHZload, fe.TypeUInt16())
v4.AuxInt = 2
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v5 := b.NewValue0(v.Pos, OpPPC64MOVHstore, TypeMem)
v5.AddArg(dst)
- v6 := b.NewValue0(v.Pos, OpPPC64MOVHZload, config.fe.TypeUInt16())
+ v6 := b.NewValue0(v.Pos, OpPPC64MOVHZload, fe.TypeUInt16())
v6.AddArg(src)
v6.AddArg(mem)
v5.AddArg(v6)
v.reset(OpPPC64MOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpPPC64MOVBstore, TypeMem)
v1.AuxInt = 1
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v2.AuxInt = 1
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpPPC64MOVBstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpPPC64MOVBZload, config.fe.TypeUInt8())
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVBZload, fe.TypeUInt8())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
}
return false
}
-func rewriteValuePPC64_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpMul16(v *Value) bool {
// match: (Mul16 x y)
// cond:
// result: (MULLW x y)
return true
}
}
-func rewriteValuePPC64_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpMul32(v *Value) bool {
// match: (Mul32 x y)
// cond:
// result: (MULLW x y)
return true
}
}
-func rewriteValuePPC64_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpMul32F(v *Value) bool {
// match: (Mul32F x y)
// cond:
// result: (FMULS x y)
return true
}
}
-func rewriteValuePPC64_OpMul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpMul64(v *Value) bool {
// match: (Mul64 x y)
// cond:
// result: (MULLD x y)
return true
}
}
-func rewriteValuePPC64_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpMul64F(v *Value) bool {
// match: (Mul64F x y)
// cond:
// result: (FMUL x y)
return true
}
}
-func rewriteValuePPC64_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpMul8(v *Value) bool {
// match: (Mul8 x y)
// cond:
// result: (MULLW x y)
return true
}
}
-func rewriteValuePPC64_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNeg16(v *Value) bool {
// match: (Neg16 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValuePPC64_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNeg32(v *Value) bool {
// match: (Neg32 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValuePPC64_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNeg32F(v *Value) bool {
// match: (Neg32F x)
// cond:
// result: (FNEG x)
return true
}
}
-func rewriteValuePPC64_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNeg64(v *Value) bool {
// match: (Neg64 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValuePPC64_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNeg64F(v *Value) bool {
// match: (Neg64F x)
// cond:
// result: (FNEG x)
return true
}
}
-func rewriteValuePPC64_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNeg8(v *Value) bool {
// match: (Neg8 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpNeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq16 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
}
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpNeq32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpNeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32 x y)
return true
}
}
-func rewriteValuePPC64_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpNeq32F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32F x y)
return true
}
}
-func rewriteValuePPC64_OpNeq64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpNeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64 x y)
return true
}
}
-func rewriteValuePPC64_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpNeq64F(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64F x y)
return true
}
}
-func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpNeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq8 x y)
// cond: isSigned(x.Type) && isSigned(y.Type)
// result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
}
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
y := v.Args[1]
v.reset(OpPPC64NotEqual)
v0 := b.NewValue0(v.Pos, OpPPC64CMPW, TypeFlags)
- v1 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValuePPC64_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNeqB(v *Value) bool {
// match: (NeqB x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValuePPC64_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpNeqPtr(v *Value) bool {
b := v.Block
_ = b
// match: (NeqPtr x y)
return true
}
}
-func rewriteValuePPC64_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNilCheck(v *Value) bool {
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
return true
}
}
-func rewriteValuePPC64_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpNot(v *Value) bool {
// match: (Not x)
// cond:
// result: (XORconst [1] x)
return true
}
}
-func rewriteValuePPC64_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpOffPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (OffPtr [off] ptr)
// cond:
- // result: (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
+ // result: (ADD (MOVDconst <fe.TypeInt64()> [off]) ptr)
for {
off := v.AuxInt
ptr := v.Args[0]
v.reset(OpPPC64ADD)
- v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, config.Frontend().TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, fe.TypeInt64())
v0.AuxInt = off
v.AddArg(v0)
v.AddArg(ptr)
return true
}
}
-func rewriteValuePPC64_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpOr16(v *Value) bool {
// match: (Or16 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValuePPC64_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpOr32(v *Value) bool {
// match: (Or32 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValuePPC64_OpOr64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpOr64(v *Value) bool {
// match: (Or64 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpOr8(v *Value) bool {
// match: (Or8 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValuePPC64_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpOrB(v *Value) bool {
// match: (OrB x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
// match: (ADD (MOVDconst [c]) x)
// cond: is32Bit(c)
// result: (ADDconst [c] x)
}
return false
}
-func rewriteValuePPC64_OpPPC64ADDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool {
// match: (ADDconst [c] (ADDconst [d] x))
// cond: is32Bit(c+d)
// result: (ADDconst [c+d] x)
}
return false
}
-func rewriteValuePPC64_OpPPC64AND(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
// match: (AND x (NOR y y))
// cond:
// result: (ANDN x y)
}
return false
}
-func rewriteValuePPC64_OpPPC64ANDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
// match: (ANDconst [c] (ANDconst [d] x))
// cond:
// result: (ANDconst [c&d] x)
}
return false
}
-func rewriteValuePPC64_OpPPC64CMP(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64CMP(v *Value) bool {
b := v.Block
_ = b
// match: (CMP x (MOVDconst [c]))
}
return false
}
-func rewriteValuePPC64_OpPPC64CMPU(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool {
b := v.Block
_ = b
// match: (CMPU x (MOVDconst [c]))
}
return false
}
-func rewriteValuePPC64_OpPPC64CMPUconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool {
// match: (CMPUconst (MOVDconst [x]) [y])
// cond: int64(x)==int64(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValuePPC64_OpPPC64CMPW(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool {
b := v.Block
_ = b
// match: (CMPW x (MOVWreg y))
}
return false
}
-func rewriteValuePPC64_OpPPC64CMPWU(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool {
b := v.Block
_ = b
// match: (CMPWU x (MOVWZreg y))
}
return false
}
-func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool {
// match: (CMPWUconst (MOVDconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValuePPC64_OpPPC64CMPWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64CMPWconst(v *Value) bool {
// match: (CMPWconst (MOVDconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValuePPC64_OpPPC64CMPconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64CMPconst(v *Value) bool {
// match: (CMPconst (MOVDconst [x]) [y])
// cond: int64(x)==int64(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValuePPC64_OpPPC64Equal(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64Equal(v *Value) bool {
// match: (Equal (FlagEQ))
// cond:
// result: (MOVDconst [1])
}
return false
}
-func rewriteValuePPC64_OpPPC64FMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is16Bit(off1+off2)
// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64FMOVSload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is16Bit(off1+off2)
// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value) bool {
// match: (GreaterEqual (FlagEQ))
// cond:
// result: (MOVDconst [1])
}
return false
}
-func rewriteValuePPC64_OpPPC64GreaterThan(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64GreaterThan(v *Value) bool {
// match: (GreaterThan (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValuePPC64_OpPPC64LessEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64LessEqual(v *Value) bool {
// match: (LessEqual (FlagEQ))
// cond:
// result: (MOVDconst [1])
}
return false
}
-func rewriteValuePPC64_OpPPC64LessThan(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool {
// match: (LessThan (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVBZload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
// match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool {
// match: (MOVBZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFF
// result: y
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVBreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool {
// match: (MOVBreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0x7F
// result: y
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
// match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVBstore [off1+off2] {sym} x val mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVBstorezero [off1+off2] {sym} x mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
// match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVDstore [off1+off2] {sym} x val mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVDstorezero [off1+off2] {sym} x mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVHZload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
// match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool {
// match: (MOVHZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFF
// result: y
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVHload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVHreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool {
// match: (MOVHreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0x7FFF
// result: y
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
// match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVHstore [off1+off2] {sym} x val mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVHstorezero [off1+off2] {sym} x mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVWZload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
// match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool {
// match: (MOVWZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFFFFFF
// result: y
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVWload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2)
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVWreg(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool {
// match: (MOVWreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFF
// result: y
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
// match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVWstore [off1+off2] {sym} x val mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
// result: (MOVWstorezero [off1+off2] {sym} x mem)
}
return false
}
-func rewriteValuePPC64_OpPPC64MaskIfNotCarry(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64MaskIfNotCarry(v *Value) bool {
// match: (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _)))
// cond: c < 0 && d > 0 && c + d < 0
// result: (MOVDconst [-1])
}
return false
}
-func rewriteValuePPC64_OpPPC64NotEqual(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool {
// match: (NotEqual (FlagEQ))
// cond:
// result: (MOVDconst [0])
}
return false
}
-func rewriteValuePPC64_OpPPC64OR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
// match: (OR (MOVDconst [c]) (MOVDconst [d]))
// cond:
// result: (MOVDconst [c|d])
}
return false
}
-func rewriteValuePPC64_OpPPC64ORN(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64ORN(v *Value) bool {
// match: (ORN x (MOVDconst [-1]))
// cond:
// result: x
}
return false
}
-func rewriteValuePPC64_OpPPC64ORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool {
// match: (ORconst [c] (ORconst [d] x))
// cond:
// result: (ORconst [c|d] x)
}
return false
}
-func rewriteValuePPC64_OpPPC64SUB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64SUB(v *Value) bool {
// match: (SUB x (MOVDconst [c]))
// cond: is32Bit(-c)
// result: (ADDconst [-c] x)
}
return false
}
-func rewriteValuePPC64_OpPPC64XOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
// match: (XOR (MOVDconst [c]) (MOVDconst [d]))
// cond:
// result: (MOVDconst [c^d])
}
return false
}
-func rewriteValuePPC64_OpPPC64XORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
// match: (XORconst [c] (XORconst [d] x))
// cond:
// result: (XORconst [c^d] x)
}
return false
}
-func rewriteValuePPC64_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpRound32F(v *Value) bool {
// match: (Round32F x)
// cond:
// result: x
return true
}
}
-func rewriteValuePPC64_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpRound64F(v *Value) bool {
// match: (Round64F x)
// cond:
// result: x
return true
}
}
-func rewriteValuePPC64_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux16 x y)
// cond:
- // result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ // result: (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux32 x (Const64 [c]))
// cond: uint32(c) < 16
// result: (SRWconst (ZeroExt16to32 x) [c])
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16Ux32 x y)
// cond:
- // result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ // result: (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SRWconst (ZeroExt16to32 x) [c])
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16Ux64 x y)
// cond:
- // result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ // result: (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v3.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux8 x y)
// cond:
- // result: (SRW (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ // result: (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x16 x y)
// cond:
- // result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
+ // result: (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x32 x (Const64 [c]))
// cond: uint32(c) < 16
// result: (SRAWconst (SignExt16to32 x) [c])
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16x32 x y)
// cond:
- // result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
+ // result: (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x64 x (Const64 [c]))
// cond: uint64(c) < 16
// result: (SRAWconst (SignExt16to32 x) [c])
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16x64 x y)
// cond:
- // result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
+ // result: (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
v3.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x8 x y)
// cond:
- // result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
+ // result: (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -16
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux16 x y)
// cond:
- // result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ // result: (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux32 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SRWconst x [c])
}
// match: (Rsh32Ux32 x y)
// cond:
- // result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ // result: (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SRWconst x [c])
}
// match: (Rsh32Ux64 x y)
// cond:
- // result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ // result: (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux8 x y)
// cond:
- // result: (SRW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ // result: (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x16 x y)
// cond:
- // result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
+ // result: (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x32 x (Const64 [c]))
// cond: uint32(c) < 32
// result: (SRAWconst x [c])
}
// match: (Rsh32x32 x y)
// cond:
- // result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
+ // result: (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x64 x (Const64 [c]))
// cond: uint64(c) < 32
// result: (SRAWconst x [c])
}
// match: (Rsh32x64 x y)
// cond:
- // result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
+ // result: (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
v2.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x8 x y)
// cond:
- // result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
+ // result: (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -32
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux16 x y)
// cond:
- // result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ // result: (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux32 x (Const64 [c]))
// cond: uint32(c) < 64
// result: (SRDconst x [c])
}
// match: (Rsh64Ux32 x y)
// cond:
- // result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ // result: (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux64 x (Const64 [c]))
// cond: uint64(c) < 64
// result: (SRDconst x [c])
}
// match: (Rsh64Ux64 x y)
// cond:
- // result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ // result: (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux8 x y)
// cond:
- // result: (SRD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ // result: (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x16 x y)
// cond:
- // result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
+ // result: (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh64x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x32 x (Const64 [c]))
// cond: uint32(c) < 64
// result: (SRADconst x [c])
}
// match: (Rsh64x32 x y)
// cond:
- // result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
+ // result: (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh64x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x64 x (Const64 [c]))
// cond: uint64(c) < 64
// result: (SRADconst x [c])
}
// match: (Rsh64x64 x y)
// cond:
- // result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
+ // result: (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
v2.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x8 x y)
// cond:
- // result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
+ // result: (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAD)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v0.AddArg(y)
- v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v2 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v2.AuxInt = -64
- v3 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValuePPC64_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux16 x y)
// cond:
- // result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ // result: (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux32 x (Const64 [c]))
// cond: uint32(c) < 8
// result: (SRWconst (ZeroExt8to32 x) [c])
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8Ux32 x y)
// cond:
- // result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ // result: (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SRWconst (ZeroExt8to32 x) [c])
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8Ux64 x y)
// cond:
- // result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ // result: (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v3.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux8 x y)
// cond:
- // result: (SRW (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ // result: (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRW)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x16 x y)
// cond:
- // result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
+ // result: (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x32 x (Const64 [c]))
// cond: uint32(c) < 8
// result: (SRAWconst (SignExt8to32 x) [c])
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8x32 x y)
// cond:
- // result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
+ // result: (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x64 x (Const64 [c]))
// cond: uint64(c) < 8
// result: (SRAWconst (SignExt8to32 x) [c])
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = 63
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
v.reset(OpPPC64SRAWconst)
v.AuxInt = c
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8x64 x y)
// cond:
- // result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
+ // result: (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
v3.AddArg(y)
return true
}
}
-func rewriteValuePPC64_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x8 x y)
// cond:
- // result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
+ // result: (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpPPC64SRAW)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpPPC64ORN, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpPPC64ORN, fe.TypeInt64())
v1.AddArg(y)
- v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpPPC64MaskIfNotCarry, fe.TypeInt64())
v3 := b.NewValue0(v.Pos, OpPPC64ADDconstForCarry, TypeFlags)
v3.AuxInt = -8
- v4 := b.NewValue0(v.Pos, OpZeroExt8to64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValuePPC64_OpSignExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSignExt16to64(v *Value) bool {
// match: (SignExt16to64 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValuePPC64_OpSignExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSignExt32to64(v *Value) bool {
// match: (SignExt32to64 x)
// cond:
// result: (MOVWreg x)
return true
}
}
-func rewriteValuePPC64_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValuePPC64_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValuePPC64_OpSignExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSignExt8to64(v *Value) bool {
// match: (SignExt8to64 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValuePPC64_OpSlicemask(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpSlicemask(v *Value) bool {
b := v.Block
_ = b
// match: (Slicemask <t> x)
return true
}
}
-func rewriteValuePPC64_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSqrt(v *Value) bool {
// match: (Sqrt x)
// cond:
// result: (FSQRT x)
return true
}
}
-func rewriteValuePPC64_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpStaticCall(v *Value) bool {
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
return true
}
}
-func rewriteValuePPC64_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpStore(v *Value) bool {
// match: (Store {t} ptr val mem)
// cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
}
return false
}
-func rewriteValuePPC64_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSub16(v *Value) bool {
// match: (Sub16 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValuePPC64_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSub32(v *Value) bool {
// match: (Sub32 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValuePPC64_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSub32F(v *Value) bool {
// match: (Sub32F x y)
// cond:
// result: (FSUBS x y)
return true
}
}
-func rewriteValuePPC64_OpSub64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSub64(v *Value) bool {
// match: (Sub64 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValuePPC64_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSub64F(v *Value) bool {
// match: (Sub64F x y)
// cond:
// result: (FSUB x y)
return true
}
}
-func rewriteValuePPC64_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSub8(v *Value) bool {
// match: (Sub8 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValuePPC64_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpSubPtr(v *Value) bool {
// match: (SubPtr x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValuePPC64_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValuePPC64_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValuePPC64_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValuePPC64_OpTrunc64to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpTrunc64to16(v *Value) bool {
// match: (Trunc64to16 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValuePPC64_OpTrunc64to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpTrunc64to32(v *Value) bool {
// match: (Trunc64to32 x)
// cond:
// result: (MOVWreg x)
return true
}
}
-func rewriteValuePPC64_OpTrunc64to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpTrunc64to8(v *Value) bool {
// match: (Trunc64to8 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValuePPC64_OpXor16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpXor16(v *Value) bool {
// match: (Xor16 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValuePPC64_OpXor32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpXor32(v *Value) bool {
// match: (Xor32 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValuePPC64_OpXor64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpXor64(v *Value) bool {
// match: (Xor64 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValuePPC64_OpXor8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpXor8(v *Value) bool {
// match: (Xor8 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpZero(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (Zero [0] _ mem)
// cond:
// result: mem
}
return false
}
-func rewriteValuePPC64_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVHZreg x)
return true
}
}
-func rewriteValuePPC64_OpZeroExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpZeroExt16to64(v *Value) bool {
// match: (ZeroExt16to64 x)
// cond:
// result: (MOVHZreg x)
return true
}
}
-func rewriteValuePPC64_OpZeroExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpZeroExt32to64(v *Value) bool {
// match: (ZeroExt32to64 x)
// cond:
// result: (MOVWZreg x)
return true
}
}
-func rewriteValuePPC64_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBZreg x)
return true
}
}
-func rewriteValuePPC64_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBZreg x)
return true
}
}
-func rewriteValuePPC64_OpZeroExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuePPC64_OpZeroExt8to64(v *Value) bool {
// match: (ZeroExt8to64 x)
// cond:
// result: (MOVBZreg x)
return true
}
}
-func rewriteBlockPPC64(b *Block, config *Config) bool {
+func rewriteBlockPPC64(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case BlockPPC64EQ:
// match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueS390X(v *Value, config *Config) bool {
+func rewriteValueS390X(v *Value) bool {
switch v.Op {
case OpAdd16:
- return rewriteValueS390X_OpAdd16(v, config)
+ return rewriteValueS390X_OpAdd16(v)
case OpAdd32:
- return rewriteValueS390X_OpAdd32(v, config)
+ return rewriteValueS390X_OpAdd32(v)
case OpAdd32F:
- return rewriteValueS390X_OpAdd32F(v, config)
+ return rewriteValueS390X_OpAdd32F(v)
case OpAdd64:
- return rewriteValueS390X_OpAdd64(v, config)
+ return rewriteValueS390X_OpAdd64(v)
case OpAdd64F:
- return rewriteValueS390X_OpAdd64F(v, config)
+ return rewriteValueS390X_OpAdd64F(v)
case OpAdd8:
- return rewriteValueS390X_OpAdd8(v, config)
+ return rewriteValueS390X_OpAdd8(v)
case OpAddPtr:
- return rewriteValueS390X_OpAddPtr(v, config)
+ return rewriteValueS390X_OpAddPtr(v)
case OpAddr:
- return rewriteValueS390X_OpAddr(v, config)
+ return rewriteValueS390X_OpAddr(v)
case OpAnd16:
- return rewriteValueS390X_OpAnd16(v, config)
+ return rewriteValueS390X_OpAnd16(v)
case OpAnd32:
- return rewriteValueS390X_OpAnd32(v, config)
+ return rewriteValueS390X_OpAnd32(v)
case OpAnd64:
- return rewriteValueS390X_OpAnd64(v, config)
+ return rewriteValueS390X_OpAnd64(v)
case OpAnd8:
- return rewriteValueS390X_OpAnd8(v, config)
+ return rewriteValueS390X_OpAnd8(v)
case OpAndB:
- return rewriteValueS390X_OpAndB(v, config)
+ return rewriteValueS390X_OpAndB(v)
case OpAtomicAdd32:
- return rewriteValueS390X_OpAtomicAdd32(v, config)
+ return rewriteValueS390X_OpAtomicAdd32(v)
case OpAtomicAdd64:
- return rewriteValueS390X_OpAtomicAdd64(v, config)
+ return rewriteValueS390X_OpAtomicAdd64(v)
case OpAtomicCompareAndSwap32:
- return rewriteValueS390X_OpAtomicCompareAndSwap32(v, config)
+ return rewriteValueS390X_OpAtomicCompareAndSwap32(v)
case OpAtomicCompareAndSwap64:
- return rewriteValueS390X_OpAtomicCompareAndSwap64(v, config)
+ return rewriteValueS390X_OpAtomicCompareAndSwap64(v)
case OpAtomicExchange32:
- return rewriteValueS390X_OpAtomicExchange32(v, config)
+ return rewriteValueS390X_OpAtomicExchange32(v)
case OpAtomicExchange64:
- return rewriteValueS390X_OpAtomicExchange64(v, config)
+ return rewriteValueS390X_OpAtomicExchange64(v)
case OpAtomicLoad32:
- return rewriteValueS390X_OpAtomicLoad32(v, config)
+ return rewriteValueS390X_OpAtomicLoad32(v)
case OpAtomicLoad64:
- return rewriteValueS390X_OpAtomicLoad64(v, config)
+ return rewriteValueS390X_OpAtomicLoad64(v)
case OpAtomicLoadPtr:
- return rewriteValueS390X_OpAtomicLoadPtr(v, config)
+ return rewriteValueS390X_OpAtomicLoadPtr(v)
case OpAtomicStore32:
- return rewriteValueS390X_OpAtomicStore32(v, config)
+ return rewriteValueS390X_OpAtomicStore32(v)
case OpAtomicStore64:
- return rewriteValueS390X_OpAtomicStore64(v, config)
+ return rewriteValueS390X_OpAtomicStore64(v)
case OpAtomicStorePtrNoWB:
- return rewriteValueS390X_OpAtomicStorePtrNoWB(v, config)
+ return rewriteValueS390X_OpAtomicStorePtrNoWB(v)
case OpAvg64u:
- return rewriteValueS390X_OpAvg64u(v, config)
+ return rewriteValueS390X_OpAvg64u(v)
case OpBitLen64:
- return rewriteValueS390X_OpBitLen64(v, config)
+ return rewriteValueS390X_OpBitLen64(v)
case OpBswap32:
- return rewriteValueS390X_OpBswap32(v, config)
+ return rewriteValueS390X_OpBswap32(v)
case OpBswap64:
- return rewriteValueS390X_OpBswap64(v, config)
+ return rewriteValueS390X_OpBswap64(v)
case OpClosureCall:
- return rewriteValueS390X_OpClosureCall(v, config)
+ return rewriteValueS390X_OpClosureCall(v)
case OpCom16:
- return rewriteValueS390X_OpCom16(v, config)
+ return rewriteValueS390X_OpCom16(v)
case OpCom32:
- return rewriteValueS390X_OpCom32(v, config)
+ return rewriteValueS390X_OpCom32(v)
case OpCom64:
- return rewriteValueS390X_OpCom64(v, config)
+ return rewriteValueS390X_OpCom64(v)
case OpCom8:
- return rewriteValueS390X_OpCom8(v, config)
+ return rewriteValueS390X_OpCom8(v)
case OpConst16:
- return rewriteValueS390X_OpConst16(v, config)
+ return rewriteValueS390X_OpConst16(v)
case OpConst32:
- return rewriteValueS390X_OpConst32(v, config)
+ return rewriteValueS390X_OpConst32(v)
case OpConst32F:
- return rewriteValueS390X_OpConst32F(v, config)
+ return rewriteValueS390X_OpConst32F(v)
case OpConst64:
- return rewriteValueS390X_OpConst64(v, config)
+ return rewriteValueS390X_OpConst64(v)
case OpConst64F:
- return rewriteValueS390X_OpConst64F(v, config)
+ return rewriteValueS390X_OpConst64F(v)
case OpConst8:
- return rewriteValueS390X_OpConst8(v, config)
+ return rewriteValueS390X_OpConst8(v)
case OpConstBool:
- return rewriteValueS390X_OpConstBool(v, config)
+ return rewriteValueS390X_OpConstBool(v)
case OpConstNil:
- return rewriteValueS390X_OpConstNil(v, config)
+ return rewriteValueS390X_OpConstNil(v)
case OpConvert:
- return rewriteValueS390X_OpConvert(v, config)
+ return rewriteValueS390X_OpConvert(v)
case OpCtz32:
- return rewriteValueS390X_OpCtz32(v, config)
+ return rewriteValueS390X_OpCtz32(v)
case OpCtz64:
- return rewriteValueS390X_OpCtz64(v, config)
+ return rewriteValueS390X_OpCtz64(v)
case OpCvt32Fto32:
- return rewriteValueS390X_OpCvt32Fto32(v, config)
+ return rewriteValueS390X_OpCvt32Fto32(v)
case OpCvt32Fto64:
- return rewriteValueS390X_OpCvt32Fto64(v, config)
+ return rewriteValueS390X_OpCvt32Fto64(v)
case OpCvt32Fto64F:
- return rewriteValueS390X_OpCvt32Fto64F(v, config)
+ return rewriteValueS390X_OpCvt32Fto64F(v)
case OpCvt32to32F:
- return rewriteValueS390X_OpCvt32to32F(v, config)
+ return rewriteValueS390X_OpCvt32to32F(v)
case OpCvt32to64F:
- return rewriteValueS390X_OpCvt32to64F(v, config)
+ return rewriteValueS390X_OpCvt32to64F(v)
case OpCvt64Fto32:
- return rewriteValueS390X_OpCvt64Fto32(v, config)
+ return rewriteValueS390X_OpCvt64Fto32(v)
case OpCvt64Fto32F:
- return rewriteValueS390X_OpCvt64Fto32F(v, config)
+ return rewriteValueS390X_OpCvt64Fto32F(v)
case OpCvt64Fto64:
- return rewriteValueS390X_OpCvt64Fto64(v, config)
+ return rewriteValueS390X_OpCvt64Fto64(v)
case OpCvt64to32F:
- return rewriteValueS390X_OpCvt64to32F(v, config)
+ return rewriteValueS390X_OpCvt64to32F(v)
case OpCvt64to64F:
- return rewriteValueS390X_OpCvt64to64F(v, config)
+ return rewriteValueS390X_OpCvt64to64F(v)
case OpDiv16:
- return rewriteValueS390X_OpDiv16(v, config)
+ return rewriteValueS390X_OpDiv16(v)
case OpDiv16u:
- return rewriteValueS390X_OpDiv16u(v, config)
+ return rewriteValueS390X_OpDiv16u(v)
case OpDiv32:
- return rewriteValueS390X_OpDiv32(v, config)
+ return rewriteValueS390X_OpDiv32(v)
case OpDiv32F:
- return rewriteValueS390X_OpDiv32F(v, config)
+ return rewriteValueS390X_OpDiv32F(v)
case OpDiv32u:
- return rewriteValueS390X_OpDiv32u(v, config)
+ return rewriteValueS390X_OpDiv32u(v)
case OpDiv64:
- return rewriteValueS390X_OpDiv64(v, config)
+ return rewriteValueS390X_OpDiv64(v)
case OpDiv64F:
- return rewriteValueS390X_OpDiv64F(v, config)
+ return rewriteValueS390X_OpDiv64F(v)
case OpDiv64u:
- return rewriteValueS390X_OpDiv64u(v, config)
+ return rewriteValueS390X_OpDiv64u(v)
case OpDiv8:
- return rewriteValueS390X_OpDiv8(v, config)
+ return rewriteValueS390X_OpDiv8(v)
case OpDiv8u:
- return rewriteValueS390X_OpDiv8u(v, config)
+ return rewriteValueS390X_OpDiv8u(v)
case OpEq16:
- return rewriteValueS390X_OpEq16(v, config)
+ return rewriteValueS390X_OpEq16(v)
case OpEq32:
- return rewriteValueS390X_OpEq32(v, config)
+ return rewriteValueS390X_OpEq32(v)
case OpEq32F:
- return rewriteValueS390X_OpEq32F(v, config)
+ return rewriteValueS390X_OpEq32F(v)
case OpEq64:
- return rewriteValueS390X_OpEq64(v, config)
+ return rewriteValueS390X_OpEq64(v)
case OpEq64F:
- return rewriteValueS390X_OpEq64F(v, config)
+ return rewriteValueS390X_OpEq64F(v)
case OpEq8:
- return rewriteValueS390X_OpEq8(v, config)
+ return rewriteValueS390X_OpEq8(v)
case OpEqB:
- return rewriteValueS390X_OpEqB(v, config)
+ return rewriteValueS390X_OpEqB(v)
case OpEqPtr:
- return rewriteValueS390X_OpEqPtr(v, config)
+ return rewriteValueS390X_OpEqPtr(v)
case OpGeq16:
- return rewriteValueS390X_OpGeq16(v, config)
+ return rewriteValueS390X_OpGeq16(v)
case OpGeq16U:
- return rewriteValueS390X_OpGeq16U(v, config)
+ return rewriteValueS390X_OpGeq16U(v)
case OpGeq32:
- return rewriteValueS390X_OpGeq32(v, config)
+ return rewriteValueS390X_OpGeq32(v)
case OpGeq32F:
- return rewriteValueS390X_OpGeq32F(v, config)
+ return rewriteValueS390X_OpGeq32F(v)
case OpGeq32U:
- return rewriteValueS390X_OpGeq32U(v, config)
+ return rewriteValueS390X_OpGeq32U(v)
case OpGeq64:
- return rewriteValueS390X_OpGeq64(v, config)
+ return rewriteValueS390X_OpGeq64(v)
case OpGeq64F:
- return rewriteValueS390X_OpGeq64F(v, config)
+ return rewriteValueS390X_OpGeq64F(v)
case OpGeq64U:
- return rewriteValueS390X_OpGeq64U(v, config)
+ return rewriteValueS390X_OpGeq64U(v)
case OpGeq8:
- return rewriteValueS390X_OpGeq8(v, config)
+ return rewriteValueS390X_OpGeq8(v)
case OpGeq8U:
- return rewriteValueS390X_OpGeq8U(v, config)
+ return rewriteValueS390X_OpGeq8U(v)
case OpGetClosurePtr:
- return rewriteValueS390X_OpGetClosurePtr(v, config)
+ return rewriteValueS390X_OpGetClosurePtr(v)
case OpGetG:
- return rewriteValueS390X_OpGetG(v, config)
+ return rewriteValueS390X_OpGetG(v)
case OpGreater16:
- return rewriteValueS390X_OpGreater16(v, config)
+ return rewriteValueS390X_OpGreater16(v)
case OpGreater16U:
- return rewriteValueS390X_OpGreater16U(v, config)
+ return rewriteValueS390X_OpGreater16U(v)
case OpGreater32:
- return rewriteValueS390X_OpGreater32(v, config)
+ return rewriteValueS390X_OpGreater32(v)
case OpGreater32F:
- return rewriteValueS390X_OpGreater32F(v, config)
+ return rewriteValueS390X_OpGreater32F(v)
case OpGreater32U:
- return rewriteValueS390X_OpGreater32U(v, config)
+ return rewriteValueS390X_OpGreater32U(v)
case OpGreater64:
- return rewriteValueS390X_OpGreater64(v, config)
+ return rewriteValueS390X_OpGreater64(v)
case OpGreater64F:
- return rewriteValueS390X_OpGreater64F(v, config)
+ return rewriteValueS390X_OpGreater64F(v)
case OpGreater64U:
- return rewriteValueS390X_OpGreater64U(v, config)
+ return rewriteValueS390X_OpGreater64U(v)
case OpGreater8:
- return rewriteValueS390X_OpGreater8(v, config)
+ return rewriteValueS390X_OpGreater8(v)
case OpGreater8U:
- return rewriteValueS390X_OpGreater8U(v, config)
+ return rewriteValueS390X_OpGreater8U(v)
case OpHmul32:
- return rewriteValueS390X_OpHmul32(v, config)
+ return rewriteValueS390X_OpHmul32(v)
case OpHmul32u:
- return rewriteValueS390X_OpHmul32u(v, config)
+ return rewriteValueS390X_OpHmul32u(v)
case OpHmul64:
- return rewriteValueS390X_OpHmul64(v, config)
+ return rewriteValueS390X_OpHmul64(v)
case OpHmul64u:
- return rewriteValueS390X_OpHmul64u(v, config)
+ return rewriteValueS390X_OpHmul64u(v)
case OpITab:
- return rewriteValueS390X_OpITab(v, config)
+ return rewriteValueS390X_OpITab(v)
case OpInterCall:
- return rewriteValueS390X_OpInterCall(v, config)
+ return rewriteValueS390X_OpInterCall(v)
case OpIsInBounds:
- return rewriteValueS390X_OpIsInBounds(v, config)
+ return rewriteValueS390X_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValueS390X_OpIsNonNil(v, config)
+ return rewriteValueS390X_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValueS390X_OpIsSliceInBounds(v, config)
+ return rewriteValueS390X_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValueS390X_OpLeq16(v, config)
+ return rewriteValueS390X_OpLeq16(v)
case OpLeq16U:
- return rewriteValueS390X_OpLeq16U(v, config)
+ return rewriteValueS390X_OpLeq16U(v)
case OpLeq32:
- return rewriteValueS390X_OpLeq32(v, config)
+ return rewriteValueS390X_OpLeq32(v)
case OpLeq32F:
- return rewriteValueS390X_OpLeq32F(v, config)
+ return rewriteValueS390X_OpLeq32F(v)
case OpLeq32U:
- return rewriteValueS390X_OpLeq32U(v, config)
+ return rewriteValueS390X_OpLeq32U(v)
case OpLeq64:
- return rewriteValueS390X_OpLeq64(v, config)
+ return rewriteValueS390X_OpLeq64(v)
case OpLeq64F:
- return rewriteValueS390X_OpLeq64F(v, config)
+ return rewriteValueS390X_OpLeq64F(v)
case OpLeq64U:
- return rewriteValueS390X_OpLeq64U(v, config)
+ return rewriteValueS390X_OpLeq64U(v)
case OpLeq8:
- return rewriteValueS390X_OpLeq8(v, config)
+ return rewriteValueS390X_OpLeq8(v)
case OpLeq8U:
- return rewriteValueS390X_OpLeq8U(v, config)
+ return rewriteValueS390X_OpLeq8U(v)
case OpLess16:
- return rewriteValueS390X_OpLess16(v, config)
+ return rewriteValueS390X_OpLess16(v)
case OpLess16U:
- return rewriteValueS390X_OpLess16U(v, config)
+ return rewriteValueS390X_OpLess16U(v)
case OpLess32:
- return rewriteValueS390X_OpLess32(v, config)
+ return rewriteValueS390X_OpLess32(v)
case OpLess32F:
- return rewriteValueS390X_OpLess32F(v, config)
+ return rewriteValueS390X_OpLess32F(v)
case OpLess32U:
- return rewriteValueS390X_OpLess32U(v, config)
+ return rewriteValueS390X_OpLess32U(v)
case OpLess64:
- return rewriteValueS390X_OpLess64(v, config)
+ return rewriteValueS390X_OpLess64(v)
case OpLess64F:
- return rewriteValueS390X_OpLess64F(v, config)
+ return rewriteValueS390X_OpLess64F(v)
case OpLess64U:
- return rewriteValueS390X_OpLess64U(v, config)
+ return rewriteValueS390X_OpLess64U(v)
case OpLess8:
- return rewriteValueS390X_OpLess8(v, config)
+ return rewriteValueS390X_OpLess8(v)
case OpLess8U:
- return rewriteValueS390X_OpLess8U(v, config)
+ return rewriteValueS390X_OpLess8U(v)
case OpLoad:
- return rewriteValueS390X_OpLoad(v, config)
+ return rewriteValueS390X_OpLoad(v)
case OpLsh16x16:
- return rewriteValueS390X_OpLsh16x16(v, config)
+ return rewriteValueS390X_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValueS390X_OpLsh16x32(v, config)
+ return rewriteValueS390X_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValueS390X_OpLsh16x64(v, config)
+ return rewriteValueS390X_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValueS390X_OpLsh16x8(v, config)
+ return rewriteValueS390X_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValueS390X_OpLsh32x16(v, config)
+ return rewriteValueS390X_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValueS390X_OpLsh32x32(v, config)
+ return rewriteValueS390X_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValueS390X_OpLsh32x64(v, config)
+ return rewriteValueS390X_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValueS390X_OpLsh32x8(v, config)
+ return rewriteValueS390X_OpLsh32x8(v)
case OpLsh64x16:
- return rewriteValueS390X_OpLsh64x16(v, config)
+ return rewriteValueS390X_OpLsh64x16(v)
case OpLsh64x32:
- return rewriteValueS390X_OpLsh64x32(v, config)
+ return rewriteValueS390X_OpLsh64x32(v)
case OpLsh64x64:
- return rewriteValueS390X_OpLsh64x64(v, config)
+ return rewriteValueS390X_OpLsh64x64(v)
case OpLsh64x8:
- return rewriteValueS390X_OpLsh64x8(v, config)
+ return rewriteValueS390X_OpLsh64x8(v)
case OpLsh8x16:
- return rewriteValueS390X_OpLsh8x16(v, config)
+ return rewriteValueS390X_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValueS390X_OpLsh8x32(v, config)
+ return rewriteValueS390X_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValueS390X_OpLsh8x64(v, config)
+ return rewriteValueS390X_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValueS390X_OpLsh8x8(v, config)
+ return rewriteValueS390X_OpLsh8x8(v)
case OpMod16:
- return rewriteValueS390X_OpMod16(v, config)
+ return rewriteValueS390X_OpMod16(v)
case OpMod16u:
- return rewriteValueS390X_OpMod16u(v, config)
+ return rewriteValueS390X_OpMod16u(v)
case OpMod32:
- return rewriteValueS390X_OpMod32(v, config)
+ return rewriteValueS390X_OpMod32(v)
case OpMod32u:
- return rewriteValueS390X_OpMod32u(v, config)
+ return rewriteValueS390X_OpMod32u(v)
case OpMod64:
- return rewriteValueS390X_OpMod64(v, config)
+ return rewriteValueS390X_OpMod64(v)
case OpMod64u:
- return rewriteValueS390X_OpMod64u(v, config)
+ return rewriteValueS390X_OpMod64u(v)
case OpMod8:
- return rewriteValueS390X_OpMod8(v, config)
+ return rewriteValueS390X_OpMod8(v)
case OpMod8u:
- return rewriteValueS390X_OpMod8u(v, config)
+ return rewriteValueS390X_OpMod8u(v)
case OpMove:
- return rewriteValueS390X_OpMove(v, config)
+ return rewriteValueS390X_OpMove(v)
case OpMul16:
- return rewriteValueS390X_OpMul16(v, config)
+ return rewriteValueS390X_OpMul16(v)
case OpMul32:
- return rewriteValueS390X_OpMul32(v, config)
+ return rewriteValueS390X_OpMul32(v)
case OpMul32F:
- return rewriteValueS390X_OpMul32F(v, config)
+ return rewriteValueS390X_OpMul32F(v)
case OpMul64:
- return rewriteValueS390X_OpMul64(v, config)
+ return rewriteValueS390X_OpMul64(v)
case OpMul64F:
- return rewriteValueS390X_OpMul64F(v, config)
+ return rewriteValueS390X_OpMul64F(v)
case OpMul8:
- return rewriteValueS390X_OpMul8(v, config)
+ return rewriteValueS390X_OpMul8(v)
case OpNeg16:
- return rewriteValueS390X_OpNeg16(v, config)
+ return rewriteValueS390X_OpNeg16(v)
case OpNeg32:
- return rewriteValueS390X_OpNeg32(v, config)
+ return rewriteValueS390X_OpNeg32(v)
case OpNeg32F:
- return rewriteValueS390X_OpNeg32F(v, config)
+ return rewriteValueS390X_OpNeg32F(v)
case OpNeg64:
- return rewriteValueS390X_OpNeg64(v, config)
+ return rewriteValueS390X_OpNeg64(v)
case OpNeg64F:
- return rewriteValueS390X_OpNeg64F(v, config)
+ return rewriteValueS390X_OpNeg64F(v)
case OpNeg8:
- return rewriteValueS390X_OpNeg8(v, config)
+ return rewriteValueS390X_OpNeg8(v)
case OpNeq16:
- return rewriteValueS390X_OpNeq16(v, config)
+ return rewriteValueS390X_OpNeq16(v)
case OpNeq32:
- return rewriteValueS390X_OpNeq32(v, config)
+ return rewriteValueS390X_OpNeq32(v)
case OpNeq32F:
- return rewriteValueS390X_OpNeq32F(v, config)
+ return rewriteValueS390X_OpNeq32F(v)
case OpNeq64:
- return rewriteValueS390X_OpNeq64(v, config)
+ return rewriteValueS390X_OpNeq64(v)
case OpNeq64F:
- return rewriteValueS390X_OpNeq64F(v, config)
+ return rewriteValueS390X_OpNeq64F(v)
case OpNeq8:
- return rewriteValueS390X_OpNeq8(v, config)
+ return rewriteValueS390X_OpNeq8(v)
case OpNeqB:
- return rewriteValueS390X_OpNeqB(v, config)
+ return rewriteValueS390X_OpNeqB(v)
case OpNeqPtr:
- return rewriteValueS390X_OpNeqPtr(v, config)
+ return rewriteValueS390X_OpNeqPtr(v)
case OpNilCheck:
- return rewriteValueS390X_OpNilCheck(v, config)
+ return rewriteValueS390X_OpNilCheck(v)
case OpNot:
- return rewriteValueS390X_OpNot(v, config)
+ return rewriteValueS390X_OpNot(v)
case OpOffPtr:
- return rewriteValueS390X_OpOffPtr(v, config)
+ return rewriteValueS390X_OpOffPtr(v)
case OpOr16:
- return rewriteValueS390X_OpOr16(v, config)
+ return rewriteValueS390X_OpOr16(v)
case OpOr32:
- return rewriteValueS390X_OpOr32(v, config)
+ return rewriteValueS390X_OpOr32(v)
case OpOr64:
- return rewriteValueS390X_OpOr64(v, config)
+ return rewriteValueS390X_OpOr64(v)
case OpOr8:
- return rewriteValueS390X_OpOr8(v, config)
+ return rewriteValueS390X_OpOr8(v)
case OpOrB:
- return rewriteValueS390X_OpOrB(v, config)
+ return rewriteValueS390X_OpOrB(v)
case OpRound32F:
- return rewriteValueS390X_OpRound32F(v, config)
+ return rewriteValueS390X_OpRound32F(v)
case OpRound64F:
- return rewriteValueS390X_OpRound64F(v, config)
+ return rewriteValueS390X_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValueS390X_OpRsh16Ux16(v, config)
+ return rewriteValueS390X_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValueS390X_OpRsh16Ux32(v, config)
+ return rewriteValueS390X_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValueS390X_OpRsh16Ux64(v, config)
+ return rewriteValueS390X_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValueS390X_OpRsh16Ux8(v, config)
+ return rewriteValueS390X_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValueS390X_OpRsh16x16(v, config)
+ return rewriteValueS390X_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValueS390X_OpRsh16x32(v, config)
+ return rewriteValueS390X_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValueS390X_OpRsh16x64(v, config)
+ return rewriteValueS390X_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValueS390X_OpRsh16x8(v, config)
+ return rewriteValueS390X_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValueS390X_OpRsh32Ux16(v, config)
+ return rewriteValueS390X_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValueS390X_OpRsh32Ux32(v, config)
+ return rewriteValueS390X_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValueS390X_OpRsh32Ux64(v, config)
+ return rewriteValueS390X_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValueS390X_OpRsh32Ux8(v, config)
+ return rewriteValueS390X_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValueS390X_OpRsh32x16(v, config)
+ return rewriteValueS390X_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValueS390X_OpRsh32x32(v, config)
+ return rewriteValueS390X_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValueS390X_OpRsh32x64(v, config)
+ return rewriteValueS390X_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValueS390X_OpRsh32x8(v, config)
+ return rewriteValueS390X_OpRsh32x8(v)
case OpRsh64Ux16:
- return rewriteValueS390X_OpRsh64Ux16(v, config)
+ return rewriteValueS390X_OpRsh64Ux16(v)
case OpRsh64Ux32:
- return rewriteValueS390X_OpRsh64Ux32(v, config)
+ return rewriteValueS390X_OpRsh64Ux32(v)
case OpRsh64Ux64:
- return rewriteValueS390X_OpRsh64Ux64(v, config)
+ return rewriteValueS390X_OpRsh64Ux64(v)
case OpRsh64Ux8:
- return rewriteValueS390X_OpRsh64Ux8(v, config)
+ return rewriteValueS390X_OpRsh64Ux8(v)
case OpRsh64x16:
- return rewriteValueS390X_OpRsh64x16(v, config)
+ return rewriteValueS390X_OpRsh64x16(v)
case OpRsh64x32:
- return rewriteValueS390X_OpRsh64x32(v, config)
+ return rewriteValueS390X_OpRsh64x32(v)
case OpRsh64x64:
- return rewriteValueS390X_OpRsh64x64(v, config)
+ return rewriteValueS390X_OpRsh64x64(v)
case OpRsh64x8:
- return rewriteValueS390X_OpRsh64x8(v, config)
+ return rewriteValueS390X_OpRsh64x8(v)
case OpRsh8Ux16:
- return rewriteValueS390X_OpRsh8Ux16(v, config)
+ return rewriteValueS390X_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValueS390X_OpRsh8Ux32(v, config)
+ return rewriteValueS390X_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValueS390X_OpRsh8Ux64(v, config)
+ return rewriteValueS390X_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValueS390X_OpRsh8Ux8(v, config)
+ return rewriteValueS390X_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValueS390X_OpRsh8x16(v, config)
+ return rewriteValueS390X_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValueS390X_OpRsh8x32(v, config)
+ return rewriteValueS390X_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValueS390X_OpRsh8x64(v, config)
+ return rewriteValueS390X_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValueS390X_OpRsh8x8(v, config)
+ return rewriteValueS390X_OpRsh8x8(v)
case OpS390XADD:
- return rewriteValueS390X_OpS390XADD(v, config)
+ return rewriteValueS390X_OpS390XADD(v)
case OpS390XADDW:
- return rewriteValueS390X_OpS390XADDW(v, config)
+ return rewriteValueS390X_OpS390XADDW(v)
case OpS390XADDWconst:
- return rewriteValueS390X_OpS390XADDWconst(v, config)
+ return rewriteValueS390X_OpS390XADDWconst(v)
case OpS390XADDconst:
- return rewriteValueS390X_OpS390XADDconst(v, config)
+ return rewriteValueS390X_OpS390XADDconst(v)
case OpS390XAND:
- return rewriteValueS390X_OpS390XAND(v, config)
+ return rewriteValueS390X_OpS390XAND(v)
case OpS390XANDW:
- return rewriteValueS390X_OpS390XANDW(v, config)
+ return rewriteValueS390X_OpS390XANDW(v)
case OpS390XANDWconst:
- return rewriteValueS390X_OpS390XANDWconst(v, config)
+ return rewriteValueS390X_OpS390XANDWconst(v)
case OpS390XANDconst:
- return rewriteValueS390X_OpS390XANDconst(v, config)
+ return rewriteValueS390X_OpS390XANDconst(v)
case OpS390XCMP:
- return rewriteValueS390X_OpS390XCMP(v, config)
+ return rewriteValueS390X_OpS390XCMP(v)
case OpS390XCMPU:
- return rewriteValueS390X_OpS390XCMPU(v, config)
+ return rewriteValueS390X_OpS390XCMPU(v)
case OpS390XCMPUconst:
- return rewriteValueS390X_OpS390XCMPUconst(v, config)
+ return rewriteValueS390X_OpS390XCMPUconst(v)
case OpS390XCMPW:
- return rewriteValueS390X_OpS390XCMPW(v, config)
+ return rewriteValueS390X_OpS390XCMPW(v)
case OpS390XCMPWU:
- return rewriteValueS390X_OpS390XCMPWU(v, config)
+ return rewriteValueS390X_OpS390XCMPWU(v)
case OpS390XCMPWUconst:
- return rewriteValueS390X_OpS390XCMPWUconst(v, config)
+ return rewriteValueS390X_OpS390XCMPWUconst(v)
case OpS390XCMPWconst:
- return rewriteValueS390X_OpS390XCMPWconst(v, config)
+ return rewriteValueS390X_OpS390XCMPWconst(v)
case OpS390XCMPconst:
- return rewriteValueS390X_OpS390XCMPconst(v, config)
+ return rewriteValueS390X_OpS390XCMPconst(v)
case OpS390XFADD:
- return rewriteValueS390X_OpS390XFADD(v, config)
+ return rewriteValueS390X_OpS390XFADD(v)
case OpS390XFADDS:
- return rewriteValueS390X_OpS390XFADDS(v, config)
+ return rewriteValueS390X_OpS390XFADDS(v)
case OpS390XFMOVDload:
- return rewriteValueS390X_OpS390XFMOVDload(v, config)
+ return rewriteValueS390X_OpS390XFMOVDload(v)
case OpS390XFMOVDloadidx:
- return rewriteValueS390X_OpS390XFMOVDloadidx(v, config)
+ return rewriteValueS390X_OpS390XFMOVDloadidx(v)
case OpS390XFMOVDstore:
- return rewriteValueS390X_OpS390XFMOVDstore(v, config)
+ return rewriteValueS390X_OpS390XFMOVDstore(v)
case OpS390XFMOVDstoreidx:
- return rewriteValueS390X_OpS390XFMOVDstoreidx(v, config)
+ return rewriteValueS390X_OpS390XFMOVDstoreidx(v)
case OpS390XFMOVSload:
- return rewriteValueS390X_OpS390XFMOVSload(v, config)
+ return rewriteValueS390X_OpS390XFMOVSload(v)
case OpS390XFMOVSloadidx:
- return rewriteValueS390X_OpS390XFMOVSloadidx(v, config)
+ return rewriteValueS390X_OpS390XFMOVSloadidx(v)
case OpS390XFMOVSstore:
- return rewriteValueS390X_OpS390XFMOVSstore(v, config)
+ return rewriteValueS390X_OpS390XFMOVSstore(v)
case OpS390XFMOVSstoreidx:
- return rewriteValueS390X_OpS390XFMOVSstoreidx(v, config)
+ return rewriteValueS390X_OpS390XFMOVSstoreidx(v)
case OpS390XFSUB:
- return rewriteValueS390X_OpS390XFSUB(v, config)
+ return rewriteValueS390X_OpS390XFSUB(v)
case OpS390XFSUBS:
- return rewriteValueS390X_OpS390XFSUBS(v, config)
+ return rewriteValueS390X_OpS390XFSUBS(v)
case OpS390XLoweredRound32F:
- return rewriteValueS390X_OpS390XLoweredRound32F(v, config)
+ return rewriteValueS390X_OpS390XLoweredRound32F(v)
case OpS390XLoweredRound64F:
- return rewriteValueS390X_OpS390XLoweredRound64F(v, config)
+ return rewriteValueS390X_OpS390XLoweredRound64F(v)
case OpS390XMOVBZload:
- return rewriteValueS390X_OpS390XMOVBZload(v, config)
+ return rewriteValueS390X_OpS390XMOVBZload(v)
case OpS390XMOVBZloadidx:
- return rewriteValueS390X_OpS390XMOVBZloadidx(v, config)
+ return rewriteValueS390X_OpS390XMOVBZloadidx(v)
case OpS390XMOVBZreg:
- return rewriteValueS390X_OpS390XMOVBZreg(v, config)
+ return rewriteValueS390X_OpS390XMOVBZreg(v)
case OpS390XMOVBload:
- return rewriteValueS390X_OpS390XMOVBload(v, config)
+ return rewriteValueS390X_OpS390XMOVBload(v)
case OpS390XMOVBreg:
- return rewriteValueS390X_OpS390XMOVBreg(v, config)
+ return rewriteValueS390X_OpS390XMOVBreg(v)
case OpS390XMOVBstore:
- return rewriteValueS390X_OpS390XMOVBstore(v, config)
+ return rewriteValueS390X_OpS390XMOVBstore(v)
case OpS390XMOVBstoreconst:
- return rewriteValueS390X_OpS390XMOVBstoreconst(v, config)
+ return rewriteValueS390X_OpS390XMOVBstoreconst(v)
case OpS390XMOVBstoreidx:
- return rewriteValueS390X_OpS390XMOVBstoreidx(v, config)
+ return rewriteValueS390X_OpS390XMOVBstoreidx(v)
case OpS390XMOVDEQ:
- return rewriteValueS390X_OpS390XMOVDEQ(v, config)
+ return rewriteValueS390X_OpS390XMOVDEQ(v)
case OpS390XMOVDGE:
- return rewriteValueS390X_OpS390XMOVDGE(v, config)
+ return rewriteValueS390X_OpS390XMOVDGE(v)
case OpS390XMOVDGT:
- return rewriteValueS390X_OpS390XMOVDGT(v, config)
+ return rewriteValueS390X_OpS390XMOVDGT(v)
case OpS390XMOVDLE:
- return rewriteValueS390X_OpS390XMOVDLE(v, config)
+ return rewriteValueS390X_OpS390XMOVDLE(v)
case OpS390XMOVDLT:
- return rewriteValueS390X_OpS390XMOVDLT(v, config)
+ return rewriteValueS390X_OpS390XMOVDLT(v)
case OpS390XMOVDNE:
- return rewriteValueS390X_OpS390XMOVDNE(v, config)
+ return rewriteValueS390X_OpS390XMOVDNE(v)
case OpS390XMOVDaddridx:
- return rewriteValueS390X_OpS390XMOVDaddridx(v, config)
+ return rewriteValueS390X_OpS390XMOVDaddridx(v)
case OpS390XMOVDload:
- return rewriteValueS390X_OpS390XMOVDload(v, config)
+ return rewriteValueS390X_OpS390XMOVDload(v)
case OpS390XMOVDloadidx:
- return rewriteValueS390X_OpS390XMOVDloadidx(v, config)
+ return rewriteValueS390X_OpS390XMOVDloadidx(v)
case OpS390XMOVDnop:
- return rewriteValueS390X_OpS390XMOVDnop(v, config)
+ return rewriteValueS390X_OpS390XMOVDnop(v)
case OpS390XMOVDreg:
- return rewriteValueS390X_OpS390XMOVDreg(v, config)
+ return rewriteValueS390X_OpS390XMOVDreg(v)
case OpS390XMOVDstore:
- return rewriteValueS390X_OpS390XMOVDstore(v, config)
+ return rewriteValueS390X_OpS390XMOVDstore(v)
case OpS390XMOVDstoreconst:
- return rewriteValueS390X_OpS390XMOVDstoreconst(v, config)
+ return rewriteValueS390X_OpS390XMOVDstoreconst(v)
case OpS390XMOVDstoreidx:
- return rewriteValueS390X_OpS390XMOVDstoreidx(v, config)
+ return rewriteValueS390X_OpS390XMOVDstoreidx(v)
case OpS390XMOVHBRstore:
- return rewriteValueS390X_OpS390XMOVHBRstore(v, config)
+ return rewriteValueS390X_OpS390XMOVHBRstore(v)
case OpS390XMOVHBRstoreidx:
- return rewriteValueS390X_OpS390XMOVHBRstoreidx(v, config)
+ return rewriteValueS390X_OpS390XMOVHBRstoreidx(v)
case OpS390XMOVHZload:
- return rewriteValueS390X_OpS390XMOVHZload(v, config)
+ return rewriteValueS390X_OpS390XMOVHZload(v)
case OpS390XMOVHZloadidx:
- return rewriteValueS390X_OpS390XMOVHZloadidx(v, config)
+ return rewriteValueS390X_OpS390XMOVHZloadidx(v)
case OpS390XMOVHZreg:
- return rewriteValueS390X_OpS390XMOVHZreg(v, config)
+ return rewriteValueS390X_OpS390XMOVHZreg(v)
case OpS390XMOVHload:
- return rewriteValueS390X_OpS390XMOVHload(v, config)
+ return rewriteValueS390X_OpS390XMOVHload(v)
case OpS390XMOVHreg:
- return rewriteValueS390X_OpS390XMOVHreg(v, config)
+ return rewriteValueS390X_OpS390XMOVHreg(v)
case OpS390XMOVHstore:
- return rewriteValueS390X_OpS390XMOVHstore(v, config)
+ return rewriteValueS390X_OpS390XMOVHstore(v)
case OpS390XMOVHstoreconst:
- return rewriteValueS390X_OpS390XMOVHstoreconst(v, config)
+ return rewriteValueS390X_OpS390XMOVHstoreconst(v)
case OpS390XMOVHstoreidx:
- return rewriteValueS390X_OpS390XMOVHstoreidx(v, config)
+ return rewriteValueS390X_OpS390XMOVHstoreidx(v)
case OpS390XMOVWBRstore:
- return rewriteValueS390X_OpS390XMOVWBRstore(v, config)
+ return rewriteValueS390X_OpS390XMOVWBRstore(v)
case OpS390XMOVWBRstoreidx:
- return rewriteValueS390X_OpS390XMOVWBRstoreidx(v, config)
+ return rewriteValueS390X_OpS390XMOVWBRstoreidx(v)
case OpS390XMOVWZload:
- return rewriteValueS390X_OpS390XMOVWZload(v, config)
+ return rewriteValueS390X_OpS390XMOVWZload(v)
case OpS390XMOVWZloadidx:
- return rewriteValueS390X_OpS390XMOVWZloadidx(v, config)
+ return rewriteValueS390X_OpS390XMOVWZloadidx(v)
case OpS390XMOVWZreg:
- return rewriteValueS390X_OpS390XMOVWZreg(v, config)
+ return rewriteValueS390X_OpS390XMOVWZreg(v)
case OpS390XMOVWload:
- return rewriteValueS390X_OpS390XMOVWload(v, config)
+ return rewriteValueS390X_OpS390XMOVWload(v)
case OpS390XMOVWreg:
- return rewriteValueS390X_OpS390XMOVWreg(v, config)
+ return rewriteValueS390X_OpS390XMOVWreg(v)
case OpS390XMOVWstore:
- return rewriteValueS390X_OpS390XMOVWstore(v, config)
+ return rewriteValueS390X_OpS390XMOVWstore(v)
case OpS390XMOVWstoreconst:
- return rewriteValueS390X_OpS390XMOVWstoreconst(v, config)
+ return rewriteValueS390X_OpS390XMOVWstoreconst(v)
case OpS390XMOVWstoreidx:
- return rewriteValueS390X_OpS390XMOVWstoreidx(v, config)
+ return rewriteValueS390X_OpS390XMOVWstoreidx(v)
case OpS390XMULLD:
- return rewriteValueS390X_OpS390XMULLD(v, config)
+ return rewriteValueS390X_OpS390XMULLD(v)
case OpS390XMULLDconst:
- return rewriteValueS390X_OpS390XMULLDconst(v, config)
+ return rewriteValueS390X_OpS390XMULLDconst(v)
case OpS390XMULLW:
- return rewriteValueS390X_OpS390XMULLW(v, config)
+ return rewriteValueS390X_OpS390XMULLW(v)
case OpS390XMULLWconst:
- return rewriteValueS390X_OpS390XMULLWconst(v, config)
+ return rewriteValueS390X_OpS390XMULLWconst(v)
case OpS390XNEG:
- return rewriteValueS390X_OpS390XNEG(v, config)
+ return rewriteValueS390X_OpS390XNEG(v)
case OpS390XNEGW:
- return rewriteValueS390X_OpS390XNEGW(v, config)
+ return rewriteValueS390X_OpS390XNEGW(v)
case OpS390XNOT:
- return rewriteValueS390X_OpS390XNOT(v, config)
+ return rewriteValueS390X_OpS390XNOT(v)
case OpS390XNOTW:
- return rewriteValueS390X_OpS390XNOTW(v, config)
+ return rewriteValueS390X_OpS390XNOTW(v)
case OpS390XOR:
- return rewriteValueS390X_OpS390XOR(v, config)
+ return rewriteValueS390X_OpS390XOR(v)
case OpS390XORW:
- return rewriteValueS390X_OpS390XORW(v, config)
+ return rewriteValueS390X_OpS390XORW(v)
case OpS390XORWconst:
- return rewriteValueS390X_OpS390XORWconst(v, config)
+ return rewriteValueS390X_OpS390XORWconst(v)
case OpS390XORconst:
- return rewriteValueS390X_OpS390XORconst(v, config)
+ return rewriteValueS390X_OpS390XORconst(v)
case OpS390XSLD:
- return rewriteValueS390X_OpS390XSLD(v, config)
+ return rewriteValueS390X_OpS390XSLD(v)
case OpS390XSLW:
- return rewriteValueS390X_OpS390XSLW(v, config)
+ return rewriteValueS390X_OpS390XSLW(v)
case OpS390XSRAD:
- return rewriteValueS390X_OpS390XSRAD(v, config)
+ return rewriteValueS390X_OpS390XSRAD(v)
case OpS390XSRADconst:
- return rewriteValueS390X_OpS390XSRADconst(v, config)
+ return rewriteValueS390X_OpS390XSRADconst(v)
case OpS390XSRAW:
- return rewriteValueS390X_OpS390XSRAW(v, config)
+ return rewriteValueS390X_OpS390XSRAW(v)
case OpS390XSRAWconst:
- return rewriteValueS390X_OpS390XSRAWconst(v, config)
+ return rewriteValueS390X_OpS390XSRAWconst(v)
case OpS390XSRD:
- return rewriteValueS390X_OpS390XSRD(v, config)
+ return rewriteValueS390X_OpS390XSRD(v)
case OpS390XSRW:
- return rewriteValueS390X_OpS390XSRW(v, config)
+ return rewriteValueS390X_OpS390XSRW(v)
case OpS390XSTM2:
- return rewriteValueS390X_OpS390XSTM2(v, config)
+ return rewriteValueS390X_OpS390XSTM2(v)
case OpS390XSTMG2:
- return rewriteValueS390X_OpS390XSTMG2(v, config)
+ return rewriteValueS390X_OpS390XSTMG2(v)
case OpS390XSUB:
- return rewriteValueS390X_OpS390XSUB(v, config)
+ return rewriteValueS390X_OpS390XSUB(v)
case OpS390XSUBEWcarrymask:
- return rewriteValueS390X_OpS390XSUBEWcarrymask(v, config)
+ return rewriteValueS390X_OpS390XSUBEWcarrymask(v)
case OpS390XSUBEcarrymask:
- return rewriteValueS390X_OpS390XSUBEcarrymask(v, config)
+ return rewriteValueS390X_OpS390XSUBEcarrymask(v)
case OpS390XSUBW:
- return rewriteValueS390X_OpS390XSUBW(v, config)
+ return rewriteValueS390X_OpS390XSUBW(v)
case OpS390XSUBWconst:
- return rewriteValueS390X_OpS390XSUBWconst(v, config)
+ return rewriteValueS390X_OpS390XSUBWconst(v)
case OpS390XSUBconst:
- return rewriteValueS390X_OpS390XSUBconst(v, config)
+ return rewriteValueS390X_OpS390XSUBconst(v)
case OpS390XXOR:
- return rewriteValueS390X_OpS390XXOR(v, config)
+ return rewriteValueS390X_OpS390XXOR(v)
case OpS390XXORW:
- return rewriteValueS390X_OpS390XXORW(v, config)
+ return rewriteValueS390X_OpS390XXORW(v)
case OpS390XXORWconst:
- return rewriteValueS390X_OpS390XXORWconst(v, config)
+ return rewriteValueS390X_OpS390XXORWconst(v)
case OpS390XXORconst:
- return rewriteValueS390X_OpS390XXORconst(v, config)
+ return rewriteValueS390X_OpS390XXORconst(v)
case OpSelect0:
- return rewriteValueS390X_OpSelect0(v, config)
+ return rewriteValueS390X_OpSelect0(v)
case OpSelect1:
- return rewriteValueS390X_OpSelect1(v, config)
+ return rewriteValueS390X_OpSelect1(v)
case OpSignExt16to32:
- return rewriteValueS390X_OpSignExt16to32(v, config)
+ return rewriteValueS390X_OpSignExt16to32(v)
case OpSignExt16to64:
- return rewriteValueS390X_OpSignExt16to64(v, config)
+ return rewriteValueS390X_OpSignExt16to64(v)
case OpSignExt32to64:
- return rewriteValueS390X_OpSignExt32to64(v, config)
+ return rewriteValueS390X_OpSignExt32to64(v)
case OpSignExt8to16:
- return rewriteValueS390X_OpSignExt8to16(v, config)
+ return rewriteValueS390X_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValueS390X_OpSignExt8to32(v, config)
+ return rewriteValueS390X_OpSignExt8to32(v)
case OpSignExt8to64:
- return rewriteValueS390X_OpSignExt8to64(v, config)
+ return rewriteValueS390X_OpSignExt8to64(v)
case OpSlicemask:
- return rewriteValueS390X_OpSlicemask(v, config)
+ return rewriteValueS390X_OpSlicemask(v)
case OpSqrt:
- return rewriteValueS390X_OpSqrt(v, config)
+ return rewriteValueS390X_OpSqrt(v)
case OpStaticCall:
- return rewriteValueS390X_OpStaticCall(v, config)
+ return rewriteValueS390X_OpStaticCall(v)
case OpStore:
- return rewriteValueS390X_OpStore(v, config)
+ return rewriteValueS390X_OpStore(v)
case OpSub16:
- return rewriteValueS390X_OpSub16(v, config)
+ return rewriteValueS390X_OpSub16(v)
case OpSub32:
- return rewriteValueS390X_OpSub32(v, config)
+ return rewriteValueS390X_OpSub32(v)
case OpSub32F:
- return rewriteValueS390X_OpSub32F(v, config)
+ return rewriteValueS390X_OpSub32F(v)
case OpSub64:
- return rewriteValueS390X_OpSub64(v, config)
+ return rewriteValueS390X_OpSub64(v)
case OpSub64F:
- return rewriteValueS390X_OpSub64F(v, config)
+ return rewriteValueS390X_OpSub64F(v)
case OpSub8:
- return rewriteValueS390X_OpSub8(v, config)
+ return rewriteValueS390X_OpSub8(v)
case OpSubPtr:
- return rewriteValueS390X_OpSubPtr(v, config)
+ return rewriteValueS390X_OpSubPtr(v)
case OpTrunc16to8:
- return rewriteValueS390X_OpTrunc16to8(v, config)
+ return rewriteValueS390X_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValueS390X_OpTrunc32to16(v, config)
+ return rewriteValueS390X_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValueS390X_OpTrunc32to8(v, config)
+ return rewriteValueS390X_OpTrunc32to8(v)
case OpTrunc64to16:
- return rewriteValueS390X_OpTrunc64to16(v, config)
+ return rewriteValueS390X_OpTrunc64to16(v)
case OpTrunc64to32:
- return rewriteValueS390X_OpTrunc64to32(v, config)
+ return rewriteValueS390X_OpTrunc64to32(v)
case OpTrunc64to8:
- return rewriteValueS390X_OpTrunc64to8(v, config)
+ return rewriteValueS390X_OpTrunc64to8(v)
case OpXor16:
- return rewriteValueS390X_OpXor16(v, config)
+ return rewriteValueS390X_OpXor16(v)
case OpXor32:
- return rewriteValueS390X_OpXor32(v, config)
+ return rewriteValueS390X_OpXor32(v)
case OpXor64:
- return rewriteValueS390X_OpXor64(v, config)
+ return rewriteValueS390X_OpXor64(v)
case OpXor8:
- return rewriteValueS390X_OpXor8(v, config)
+ return rewriteValueS390X_OpXor8(v)
case OpZero:
- return rewriteValueS390X_OpZero(v, config)
+ return rewriteValueS390X_OpZero(v)
case OpZeroExt16to32:
- return rewriteValueS390X_OpZeroExt16to32(v, config)
+ return rewriteValueS390X_OpZeroExt16to32(v)
case OpZeroExt16to64:
- return rewriteValueS390X_OpZeroExt16to64(v, config)
+ return rewriteValueS390X_OpZeroExt16to64(v)
case OpZeroExt32to64:
- return rewriteValueS390X_OpZeroExt32to64(v, config)
+ return rewriteValueS390X_OpZeroExt32to64(v)
case OpZeroExt8to16:
- return rewriteValueS390X_OpZeroExt8to16(v, config)
+ return rewriteValueS390X_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValueS390X_OpZeroExt8to32(v, config)
+ return rewriteValueS390X_OpZeroExt8to32(v)
case OpZeroExt8to64:
- return rewriteValueS390X_OpZeroExt8to64(v, config)
+ return rewriteValueS390X_OpZeroExt8to64(v)
}
return false
}
-func rewriteValueS390X_OpAdd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAdd16(v *Value) bool {
// match: (Add16 x y)
// cond:
// result: (ADDW x y)
return true
}
}
-func rewriteValueS390X_OpAdd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAdd32(v *Value) bool {
// match: (Add32 x y)
// cond:
// result: (ADDW x y)
return true
}
}
-func rewriteValueS390X_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAdd32F(v *Value) bool {
// match: (Add32F x y)
// cond:
// result: (FADDS x y)
return true
}
}
-func rewriteValueS390X_OpAdd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAdd64(v *Value) bool {
// match: (Add64 x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueS390X_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAdd64F(v *Value) bool {
// match: (Add64F x y)
// cond:
// result: (FADD x y)
return true
}
}
-func rewriteValueS390X_OpAdd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAdd8(v *Value) bool {
// match: (Add8 x y)
// cond:
// result: (ADDW x y)
return true
}
}
-func rewriteValueS390X_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAddPtr(v *Value) bool {
// match: (AddPtr x y)
// cond:
// result: (ADD x y)
return true
}
}
-func rewriteValueS390X_OpAddr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAddr(v *Value) bool {
// match: (Addr {sym} base)
// cond:
// result: (MOVDaddr {sym} base)
return true
}
}
-func rewriteValueS390X_OpAnd16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAnd16(v *Value) bool {
// match: (And16 x y)
// cond:
// result: (ANDW x y)
return true
}
}
-func rewriteValueS390X_OpAnd32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAnd32(v *Value) bool {
// match: (And32 x y)
// cond:
// result: (ANDW x y)
return true
}
}
-func rewriteValueS390X_OpAnd64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAnd64(v *Value) bool {
// match: (And64 x y)
// cond:
// result: (AND x y)
return true
}
}
-func rewriteValueS390X_OpAnd8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAnd8(v *Value) bool {
// match: (And8 x y)
// cond:
// result: (ANDW x y)
return true
}
}
-func rewriteValueS390X_OpAndB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAndB(v *Value) bool {
// match: (AndB x y)
// cond:
// result: (ANDW x y)
return true
}
}
-func rewriteValueS390X_OpAtomicAdd32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpAtomicAdd32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicAdd32 ptr val mem)
// cond:
// result: (AddTupleFirst32 (LAA ptr val mem) val)
val := v.Args[1]
mem := v.Args[2]
v.reset(OpS390XAddTupleFirst32)
- v0 := b.NewValue0(v.Pos, OpS390XLAA, MakeTuple(config.fe.TypeUInt32(), TypeMem))
+ v0 := b.NewValue0(v.Pos, OpS390XLAA, MakeTuple(fe.TypeUInt32(), TypeMem))
v0.AddArg(ptr)
v0.AddArg(val)
v0.AddArg(mem)
return true
}
}
-func rewriteValueS390X_OpAtomicAdd64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpAtomicAdd64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (AtomicAdd64 ptr val mem)
// cond:
// result: (AddTupleFirst64 (LAAG ptr val mem) val)
val := v.Args[1]
mem := v.Args[2]
v.reset(OpS390XAddTupleFirst64)
- v0 := b.NewValue0(v.Pos, OpS390XLAAG, MakeTuple(config.fe.TypeUInt64(), TypeMem))
+ v0 := b.NewValue0(v.Pos, OpS390XLAAG, MakeTuple(fe.TypeUInt64(), TypeMem))
v0.AddArg(ptr)
v0.AddArg(val)
v0.AddArg(mem)
return true
}
}
-func rewriteValueS390X_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicCompareAndSwap32(v *Value) bool {
// match: (AtomicCompareAndSwap32 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas32 ptr old new_ mem)
return true
}
}
-func rewriteValueS390X_OpAtomicCompareAndSwap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicCompareAndSwap64(v *Value) bool {
// match: (AtomicCompareAndSwap64 ptr old new_ mem)
// cond:
// result: (LoweredAtomicCas64 ptr old new_ mem)
return true
}
}
-func rewriteValueS390X_OpAtomicExchange32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicExchange32(v *Value) bool {
// match: (AtomicExchange32 ptr val mem)
// cond:
// result: (LoweredAtomicExchange32 ptr val mem)
return true
}
}
-func rewriteValueS390X_OpAtomicExchange64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicExchange64(v *Value) bool {
// match: (AtomicExchange64 ptr val mem)
// cond:
// result: (LoweredAtomicExchange64 ptr val mem)
return true
}
}
-func rewriteValueS390X_OpAtomicLoad32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicLoad32(v *Value) bool {
// match: (AtomicLoad32 ptr mem)
// cond:
// result: (MOVWZatomicload ptr mem)
return true
}
}
-func rewriteValueS390X_OpAtomicLoad64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicLoad64(v *Value) bool {
// match: (AtomicLoad64 ptr mem)
// cond:
// result: (MOVDatomicload ptr mem)
return true
}
}
-func rewriteValueS390X_OpAtomicLoadPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicLoadPtr(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
// result: (MOVDatomicload ptr mem)
return true
}
}
-func rewriteValueS390X_OpAtomicStore32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicStore32(v *Value) bool {
// match: (AtomicStore32 ptr val mem)
// cond:
// result: (MOVWatomicstore ptr val mem)
return true
}
}
-func rewriteValueS390X_OpAtomicStore64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicStore64(v *Value) bool {
// match: (AtomicStore64 ptr val mem)
// cond:
// result: (MOVDatomicstore ptr val mem)
return true
}
}
-func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value) bool {
// match: (AtomicStorePtrNoWB ptr val mem)
// cond:
// result: (MOVDatomicstore ptr val mem)
return true
}
}
-func rewriteValueS390X_OpAvg64u(v *Value, config *Config) bool {
+func rewriteValueS390X_OpAvg64u(v *Value) bool {
b := v.Block
_ = b
// match: (Avg64u <t> x y)
return true
}
}
-func rewriteValueS390X_OpBitLen64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpBitLen64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (BitLen64 x)
// cond:
// result: (SUB (MOVDconst [64]) (FLOGR x))
for {
x := v.Args[0]
v.reset(OpS390XSUB)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 64
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XFLOGR, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, fe.TypeUInt64())
v1.AddArg(x)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpBswap32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpBswap32(v *Value) bool {
// match: (Bswap32 x)
// cond:
// result: (MOVWBR x)
return true
}
}
-func rewriteValueS390X_OpBswap64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpBswap64(v *Value) bool {
// match: (Bswap64 x)
// cond:
// result: (MOVDBR x)
return true
}
}
-func rewriteValueS390X_OpClosureCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpClosureCall(v *Value) bool {
// match: (ClosureCall [argwid] entry closure mem)
// cond:
// result: (CALLclosure [argwid] entry closure mem)
return true
}
}
-func rewriteValueS390X_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCom16(v *Value) bool {
// match: (Com16 x)
// cond:
// result: (NOTW x)
return true
}
}
-func rewriteValueS390X_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCom32(v *Value) bool {
// match: (Com32 x)
// cond:
// result: (NOTW x)
return true
}
}
-func rewriteValueS390X_OpCom64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCom64(v *Value) bool {
// match: (Com64 x)
// cond:
// result: (NOT x)
return true
}
}
-func rewriteValueS390X_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCom8(v *Value) bool {
// match: (Com8 x)
// cond:
// result: (NOTW x)
return true
}
}
-func rewriteValueS390X_OpConst16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConst16(v *Value) bool {
// match: (Const16 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueS390X_OpConst32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConst32(v *Value) bool {
// match: (Const32 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueS390X_OpConst32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConst32F(v *Value) bool {
// match: (Const32F [val])
// cond:
// result: (FMOVSconst [val])
return true
}
}
-func rewriteValueS390X_OpConst64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConst64(v *Value) bool {
// match: (Const64 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueS390X_OpConst64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConst64F(v *Value) bool {
// match: (Const64F [val])
// cond:
// result: (FMOVDconst [val])
return true
}
}
-func rewriteValueS390X_OpConst8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConst8(v *Value) bool {
// match: (Const8 [val])
// cond:
// result: (MOVDconst [val])
return true
}
}
-func rewriteValueS390X_OpConstBool(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConstBool(v *Value) bool {
// match: (ConstBool [b])
// cond:
// result: (MOVDconst [b])
return true
}
}
-func rewriteValueS390X_OpConstNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConstNil(v *Value) bool {
// match: (ConstNil)
// cond:
// result: (MOVDconst [0])
return true
}
}
-func rewriteValueS390X_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpConvert(v *Value) bool {
// match: (Convert <t> x mem)
// cond:
// result: (MOVDconvert <t> x mem)
return true
}
}
-func rewriteValueS390X_OpCtz32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpCtz32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Ctz32 <t> x)
// cond:
// result: (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
t := v.Type
x := v.Args[0]
v.reset(OpS390XSUB)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 64
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XFLOGR, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, fe.TypeUInt64())
v3 := b.NewValue0(v.Pos, OpS390XANDW, t)
v4 := b.NewValue0(v.Pos, OpS390XSUBWconst, t)
v4.AuxInt = 1
return true
}
}
-func rewriteValueS390X_OpCtz64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpCtz64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Ctz64 <t> x)
// cond:
// result: (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
t := v.Type
x := v.Args[0]
v.reset(OpS390XSUB)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 64
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XFLOGR, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, fe.TypeUInt64())
v2 := b.NewValue0(v.Pos, OpS390XAND, t)
v3 := b.NewValue0(v.Pos, OpS390XSUBconst, t)
v3.AuxInt = 1
return true
}
}
-func rewriteValueS390X_OpCvt32Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt32Fto32(v *Value) bool {
// match: (Cvt32Fto32 x)
// cond:
// result: (CFEBRA x)
return true
}
}
-func rewriteValueS390X_OpCvt32Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt32Fto64(v *Value) bool {
// match: (Cvt32Fto64 x)
// cond:
// result: (CGEBRA x)
return true
}
}
-func rewriteValueS390X_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F x)
// cond:
// result: (LDEBR x)
return true
}
}
-func rewriteValueS390X_OpCvt32to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt32to32F(v *Value) bool {
// match: (Cvt32to32F x)
// cond:
// result: (CEFBRA x)
return true
}
}
-func rewriteValueS390X_OpCvt32to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt32to64F(v *Value) bool {
// match: (Cvt32to64F x)
// cond:
// result: (CDFBRA x)
return true
}
}
-func rewriteValueS390X_OpCvt64Fto32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt64Fto32(v *Value) bool {
// match: (Cvt64Fto32 x)
// cond:
// result: (CFDBRA x)
return true
}
}
-func rewriteValueS390X_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F x)
// cond:
// result: (LEDBR x)
return true
}
}
-func rewriteValueS390X_OpCvt64Fto64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt64Fto64(v *Value) bool {
// match: (Cvt64Fto64 x)
// cond:
// result: (CGDBRA x)
return true
}
}
-func rewriteValueS390X_OpCvt64to32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt64to32F(v *Value) bool {
// match: (Cvt64to32F x)
// cond:
// result: (CEGBRA x)
return true
}
}
-func rewriteValueS390X_OpCvt64to64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpCvt64to64F(v *Value) bool {
// match: (Cvt64to64F x)
// cond:
// result: (CDGBRA x)
return true
}
}
-func rewriteValueS390X_OpDiv16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpDiv16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16 x y)
// cond:
// result: (DIVW (MOVHreg x) (MOVHreg y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueS390X_OpDiv16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16u x y)
// cond:
// result: (DIVWU (MOVHZreg x) (MOVHZreg y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpDiv32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpDiv32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32 x y)
// cond:
// result: (DIVW (MOVWreg x) y)
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
return true
}
}
-func rewriteValueS390X_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpDiv32F(v *Value) bool {
// match: (Div32F x y)
// cond:
// result: (FDIVS x y)
return true
}
}
-func rewriteValueS390X_OpDiv32u(v *Value, config *Config) bool {
+func rewriteValueS390X_OpDiv32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32u x y)
// cond:
// result: (DIVWU (MOVWZreg x) y)
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
return true
}
}
-func rewriteValueS390X_OpDiv64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpDiv64(v *Value) bool {
// match: (Div64 x y)
// cond:
// result: (DIVD x y)
return true
}
}
-func rewriteValueS390X_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpDiv64F(v *Value) bool {
// match: (Div64F x y)
// cond:
// result: (FDIV x y)
return true
}
}
-func rewriteValueS390X_OpDiv64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpDiv64u(v *Value) bool {
// match: (Div64u x y)
// cond:
// result: (DIVDU x y)
return true
}
}
-func rewriteValueS390X_OpDiv8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 x y)
// cond:
// result: (DIVW (MOVBreg x) (MOVBreg y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValueS390X_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u x y)
// cond:
// result: (DIVWU (MOVBZreg x) (MOVBZreg y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XDIVWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpEq16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpEq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq16 x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpEq32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpEq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq32 x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpEq32F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpEq32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq32F x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpEq64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpEq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq64 x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpEq64F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpEq64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq64F x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpEq8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpEq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq8 x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpEqB(v *Value, config *Config) bool {
+func rewriteValueS390X_OpEqB(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqB x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValueS390X_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqPtr x y)
// cond:
// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDEQ)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16 x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq16U x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpGeq32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq32 x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq32F x y)
// cond:
// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGEnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq32U x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPWU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGeq64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq64 x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq64F x y)
// cond:
// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGEnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGeq64U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq64U x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGeq8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8 x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq8U x y)
// cond:
// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpGetClosurePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpGetClosurePtr(v *Value) bool {
// match: (GetClosurePtr)
// cond:
// result: (LoweredGetClosurePtr)
return true
}
}
-func rewriteValueS390X_OpGetG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpGetG(v *Value) bool {
// match: (GetG mem)
// cond:
// result: (LoweredGetG mem)
return true
}
}
-func rewriteValueS390X_OpGreater16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16 x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater16U x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpGreater32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater32 x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater32F x y)
// cond:
// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGTnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater32U x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPWU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGreater64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater64 x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater64F x y)
// cond:
// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGTnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGreater64U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater64U x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpGreater8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8 x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpGreater8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater8U x y)
// cond:
// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpHmul32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpHmul32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul32 x y)
// cond:
// result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
y := v.Args[1]
v.reset(OpS390XSRDconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpS390XMULLD, config.fe.TypeInt64())
- v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, fe.TypeInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, config.fe.TypeInt64())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, fe.TypeInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueS390X_OpHmul32u(v *Value, config *Config) bool {
+func rewriteValueS390X_OpHmul32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Hmul32u x y)
// cond:
// result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
y := v.Args[1]
v.reset(OpS390XSRDconst)
v.AuxInt = 32
- v0 := b.NewValue0(v.Pos, OpS390XMULLD, config.fe.TypeInt64())
- v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, fe.TypeUInt64())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
return true
}
}
-func rewriteValueS390X_OpHmul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpHmul64(v *Value) bool {
// match: (Hmul64 x y)
// cond:
// result: (MULHD x y)
return true
}
}
-func rewriteValueS390X_OpHmul64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpHmul64u(v *Value) bool {
// match: (Hmul64u x y)
// cond:
// result: (MULHDU x y)
return true
}
}
-func rewriteValueS390X_OpITab(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpITab(v *Value) bool {
// match: (ITab (Load ptr mem))
// cond:
// result: (MOVDload ptr mem)
}
return false
}
-func rewriteValueS390X_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpInterCall(v *Value) bool {
// match: (InterCall [argwid] entry mem)
// cond:
// result: (CALLinter [argwid] entry mem)
return true
}
}
-func rewriteValueS390X_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValueS390X_OpIsInBounds(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (IsInBounds idx len)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
idx := v.Args[0]
len := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueS390X_OpIsNonNil(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (IsNonNil p)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
for {
p := v.Args[0]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPconst, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueS390X_OpIsSliceInBounds(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (IsSliceInBounds idx len)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
idx := v.Args[0]
len := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16 x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq16U x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq32 x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq32F x y)
// cond:
// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGEnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq32U x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPWU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLeq64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq64 x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq64F x y)
// cond:
// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGEnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq64U x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLeq8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8 x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLeq8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq8U x y)
// cond:
// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLess16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16 x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLess16U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess16U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less16U x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLess32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less32 x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLess32F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less32F x y)
// cond:
// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGTnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLess32U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess32U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less32U x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPWU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLess64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less64 x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLess64F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less64F x y)
// cond:
// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDGTnoinv)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLess64U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less64U x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpLess8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8 x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLess8U(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLess8U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less8U x y)
// cond:
// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDLT)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPU, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLoad(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpLoad(v *Value) bool {
// match: (Load <t> ptr mem)
// cond: (is64BitInt(t) || isPtr(t))
// result: (MOVDload ptr mem)
}
return false
}
-func rewriteValueS390X_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x16 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x32 <t> x y)
return true
}
}
-func rewriteValueS390X_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh16x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x64 <t> x y)
return true
}
}
-func rewriteValueS390X_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x8 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x16 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x32 <t> x y)
return true
}
}
-func rewriteValueS390X_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh32x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x64 <t> x y)
return true
}
}
-func rewriteValueS390X_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x8 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLsh64x16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x16 <t> x y)
// cond:
// result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 63
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLsh64x32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh64x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x32 <t> x y)
return true
}
}
-func rewriteValueS390X_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh64x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x64 <t> x y)
return true
}
}
-func rewriteValueS390X_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x8 <t> x y)
// cond:
// result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 63
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x16 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x32 <t> x y)
return true
}
}
-func rewriteValueS390X_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh8x64(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x64 <t> x y)
return true
}
}
-func rewriteValueS390X_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x8 <t> x y)
// cond:
// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpMod16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpMod16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16 x y)
// cond:
// result: (MODW (MOVHreg x) (MOVHreg y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpMod16u(v *Value, config *Config) bool {
+func rewriteValueS390X_OpMod16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod16u x y)
// cond:
// result: (MODWU (MOVHZreg x) (MOVHZreg y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpMod32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpMod32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32 x y)
// cond:
// result: (MODW (MOVWreg x) y)
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
return true
}
}
-func rewriteValueS390X_OpMod32u(v *Value, config *Config) bool {
+func rewriteValueS390X_OpMod32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod32u x y)
// cond:
// result: (MODWU (MOVWZreg x) y)
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(y)
return true
}
}
-func rewriteValueS390X_OpMod64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpMod64(v *Value) bool {
// match: (Mod64 x y)
// cond:
// result: (MODD x y)
return true
}
}
-func rewriteValueS390X_OpMod64u(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpMod64u(v *Value) bool {
// match: (Mod64u x y)
// cond:
// result: (MODDU x y)
return true
}
}
-func rewriteValueS390X_OpMod8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpMod8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8 x y)
// cond:
// result: (MODW (MOVBreg x) (MOVBreg y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpMod8u(v *Value, config *Config) bool {
+func rewriteValueS390X_OpMod8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mod8u x y)
// cond:
// result: (MODWU (MOVBZreg x) (MOVBZreg y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMODWU)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValueS390X_OpMove(v *Value, config *Config) bool {
+func rewriteValueS390X_OpMove(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Move [0] _ _ mem)
// cond:
// result: mem
mem := v.Args[2]
v.reset(OpS390XMOVBstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, fe.TypeUInt8())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpS390XMOVHstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, fe.TypeUInt16())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpS390XMOVWstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, fe.TypeUInt32())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
mem := v.Args[2]
v.reset(OpS390XMOVDstore)
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, fe.TypeUInt64())
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v.reset(OpS390XMOVDstore)
v.AuxInt = 8
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, fe.TypeUInt64())
v0.AuxInt = 8
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVDload, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, fe.TypeUInt64())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpS390XMOVDstore)
v.AuxInt = 16
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, fe.TypeUInt64())
v0.AuxInt = 16
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVDload, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, fe.TypeUInt64())
v2.AuxInt = 8
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpS390XMOVDload, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVDload, fe.TypeUInt64())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
v.reset(OpS390XMOVBstore)
v.AuxInt = 2
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, fe.TypeUInt8())
v0.AuxInt = 2
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, fe.TypeUInt16())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpS390XMOVBstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, fe.TypeUInt8())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpS390XMOVHstore)
v.AuxInt = 4
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, fe.TypeUInt16())
v0.AuxInt = 4
v0.AddArg(src)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, TypeMem)
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, fe.TypeUInt32())
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v.reset(OpS390XMOVBstore)
v.AuxInt = 6
v.AddArg(dst)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, fe.TypeUInt8())
v0.AuxInt = 6
v0.AddArg(src)
v0.AddArg(mem)
v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
- v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, fe.TypeUInt16())
v2.AuxInt = 4
v2.AddArg(src)
v2.AddArg(mem)
v1.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, TypeMem)
v3.AddArg(dst)
- v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, fe.TypeUInt32())
v4.AddArg(src)
v4.AddArg(mem)
v3.AddArg(v4)
}
return false
}
-func rewriteValueS390X_OpMul16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpMul16(v *Value) bool {
// match: (Mul16 x y)
// cond:
// result: (MULLW x y)
return true
}
}
-func rewriteValueS390X_OpMul32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpMul32(v *Value) bool {
// match: (Mul32 x y)
// cond:
// result: (MULLW x y)
return true
}
}
-func rewriteValueS390X_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpMul32F(v *Value) bool {
// match: (Mul32F x y)
// cond:
// result: (FMULS x y)
return true
}
}
-func rewriteValueS390X_OpMul64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpMul64(v *Value) bool {
// match: (Mul64 x y)
// cond:
// result: (MULLD x y)
return true
}
}
-func rewriteValueS390X_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpMul64F(v *Value) bool {
// match: (Mul64F x y)
// cond:
// result: (FMUL x y)
return true
}
}
-func rewriteValueS390X_OpMul8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpMul8(v *Value) bool {
// match: (Mul8 x y)
// cond:
// result: (MULLW x y)
return true
}
}
-func rewriteValueS390X_OpNeg16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeg16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neg16 x)
// cond:
// result: (NEGW (MOVHreg x))
for {
x := v.Args[0]
v.reset(OpS390XNEGW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValueS390X_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpNeg32(v *Value) bool {
// match: (Neg32 x)
// cond:
// result: (NEGW x)
return true
}
}
-func rewriteValueS390X_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpNeg32F(v *Value) bool {
// match: (Neg32F x)
// cond:
// result: (FNEGS x)
return true
}
}
-func rewriteValueS390X_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpNeg64(v *Value) bool {
// match: (Neg64 x)
// cond:
// result: (NEG x)
return true
}
}
-func rewriteValueS390X_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpNeg64F(v *Value) bool {
// match: (Neg64F x)
// cond:
// result: (FNEG x)
return true
}
}
-func rewriteValueS390X_OpNeg8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeg8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neg8 x)
// cond:
// result: (NEGW (MOVBreg x))
for {
x := v.Args[0]
v.reset(OpS390XNEGW)
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValueS390X_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeq16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq16 x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeq32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq32 x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMPW, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeq32F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq32F x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMPS, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpNeq64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq64 x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeq64F(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq64F x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XFCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeq8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq8 x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpNeqB(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeqB(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (NeqB x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
- v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v4.AddArg(y)
v2.AddArg(v4)
v.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueS390X_OpNeqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (NeqPtr x y)
// cond:
// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpS390XMOVDNE)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = 0
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v1.AuxInt = 1
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpS390XCMP, TypeFlags)
return true
}
}
-func rewriteValueS390X_OpNilCheck(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpNilCheck(v *Value) bool {
// match: (NilCheck ptr mem)
// cond:
// result: (LoweredNilCheck ptr mem)
return true
}
}
-func rewriteValueS390X_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpNot(v *Value) bool {
// match: (Not x)
// cond:
// result: (XORWconst [1] x)
return true
}
}
-func rewriteValueS390X_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValueS390X_OpOffPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (OffPtr [off] ptr:(SP))
// cond:
// result: (MOVDaddr [off] ptr)
off := v.AuxInt
ptr := v.Args[0]
v.reset(OpS390XADD)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = off
v.AddArg(v0)
v.AddArg(ptr)
return true
}
}
-func rewriteValueS390X_OpOr16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpOr16(v *Value) bool {
// match: (Or16 x y)
// cond:
// result: (ORW x y)
return true
}
}
-func rewriteValueS390X_OpOr32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpOr32(v *Value) bool {
// match: (Or32 x y)
// cond:
// result: (ORW x y)
return true
}
}
-func rewriteValueS390X_OpOr64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpOr64(v *Value) bool {
// match: (Or64 x y)
// cond:
// result: (OR x y)
return true
}
}
-func rewriteValueS390X_OpOr8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpOr8(v *Value) bool {
// match: (Or8 x y)
// cond:
// result: (ORW x y)
return true
}
}
-func rewriteValueS390X_OpOrB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpOrB(v *Value) bool {
// match: (OrB x y)
// cond:
// result: (ORW x y)
return true
}
}
-func rewriteValueS390X_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpRound32F(v *Value) bool {
// match: (Round32F x)
// cond:
// result: (LoweredRound32F x)
return true
}
}
-func rewriteValueS390X_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpRound64F(v *Value) bool {
// match: (Round64F x)
// cond:
// result: (LoweredRound64F x)
return true
}
}
-func rewriteValueS390X_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux16 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [15])))
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v3.AuxInt = 15
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueS390X_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux32 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [15])))
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueS390X_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux64 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [15])))
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueS390X_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux8 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [15])))
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v3.AuxInt = 15
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueS390X_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x16 <t> x y)
// cond:
// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [15])))))
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v4.AuxInt = 15
- v5 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v3.AddArg(v4)
return true
}
}
-func rewriteValueS390X_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x32 <t> x y)
// cond:
// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [15])))))
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
return true
}
}
-func rewriteValueS390X_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x64 <t> x y)
// cond:
// result: (SRAW <t> (MOVHreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [15])))))
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XOR, y.Type)
return true
}
}
-func rewriteValueS390X_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x8 <t> x y)
// cond:
// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [15])))))
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v4.AuxInt = 15
- v5 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v3.AddArg(v4)
return true
}
}
-func rewriteValueS390X_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux16 <t> x y)
// cond:
// result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux32 <t> x y)
return true
}
}
-func rewriteValueS390X_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh32Ux64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux64 <t> x y)
return true
}
}
-func rewriteValueS390X_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux8 <t> x y)
// cond:
// result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 31
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x16 <t> x y)
// cond:
// result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [31])))))
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v3.AuxInt = 31
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueS390X_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x32 <t> x y)
return true
}
}
-func rewriteValueS390X_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh32x64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x64 <t> x y)
return true
}
}
-func rewriteValueS390X_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x8 <t> x y)
// cond:
// result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [31])))))
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v3.AuxInt = 31
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueS390X_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh64Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux16 <t> x y)
// cond:
// result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 63
- v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh64Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux32 <t> x y)
return true
}
}
-func rewriteValueS390X_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh64Ux64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux64 <t> x y)
return true
}
}
-func rewriteValueS390X_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh64Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux8 <t> x y)
// cond:
// result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
v1 := b.NewValue0(v.Pos, OpS390XSUBEcarrymask, t)
v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v2.AuxInt = 63
- v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
return true
}
}
-func rewriteValueS390X_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x16 <t> x y)
// cond:
// result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [63])))))
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v3.AuxInt = 63
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueS390X_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh64x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x32 <t> x y)
return true
}
}
-func rewriteValueS390X_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh64x64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x64 <t> x y)
return true
}
}
-func rewriteValueS390X_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x8 <t> x y)
// cond:
// result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [63])))))
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v3.AuxInt = 63
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueS390X_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux16 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [7])))
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v3.AuxInt = 7
- v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueS390X_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux32 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [7])))
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueS390X_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux64 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [7])))
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
return true
}
}
-func rewriteValueS390X_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux8 <t> x y)
// cond:
// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [7])))
y := v.Args[1]
v.reset(OpS390XANDW)
v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v1.AddArg(x)
v0.AddArg(v1)
v0.AddArg(y)
v2 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, t)
v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v3.AuxInt = 7
- v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v4.AddArg(y)
v3.AddArg(v4)
v2.AddArg(v3)
return true
}
}
-func rewriteValueS390X_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x16 <t> x y)
// cond:
// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [7])))))
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v4.AuxInt = 7
- v5 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v3.AddArg(v4)
return true
}
}
-func rewriteValueS390X_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x32 <t> x y)
// cond:
// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [7])))))
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
return true
}
}
-func rewriteValueS390X_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x64 <t> x y)
// cond:
// result: (SRAW <t> (MOVBreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [7])))))
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XOR, y.Type)
return true
}
}
-func rewriteValueS390X_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueS390X_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x8 <t> x y)
// cond:
// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [7])))))
y := v.Args[1]
v.reset(OpS390XSRAW)
v.Type = t
- v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, config.fe.TypeInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, fe.TypeInt64())
v0.AddArg(x)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XORW, y.Type)
v3 := b.NewValue0(v.Pos, OpS390XSUBEWcarrymask, y.Type)
v4 := b.NewValue0(v.Pos, OpS390XCMPWUconst, TypeFlags)
v4.AuxInt = 7
- v5 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeUInt64())
v5.AddArg(y)
v4.AddArg(v5)
v3.AddArg(v4)
return true
}
}
-func rewriteValueS390X_OpS390XADD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XADD(v *Value) bool {
// match: (ADD x (MOVDconst [c]))
// cond: is32Bit(c)
// result: (ADDconst [c] x)
}
return false
}
-func rewriteValueS390X_OpS390XADDW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XADDW(v *Value) bool {
// match: (ADDW x (MOVDconst [c]))
// cond:
// result: (ADDWconst [c] x)
}
return false
}
-func rewriteValueS390X_OpS390XADDWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XADDWconst(v *Value) bool {
// match: (ADDWconst [c] x)
// cond: int32(c)==0
// result: x
}
return false
}
-func rewriteValueS390X_OpS390XADDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XADDconst(v *Value) bool {
// match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB)))
// cond: ((c+d)&1 == 0) && is32Bit(c+d)
// result: (MOVDaddr [c+d] {s} x)
}
return false
}
-func rewriteValueS390X_OpS390XAND(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XAND(v *Value) bool {
// match: (AND x (MOVDconst [c]))
// cond: is32Bit(c) && c < 0
// result: (ANDconst [c] x)
}
return false
}
-func rewriteValueS390X_OpS390XANDW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XANDW(v *Value) bool {
// match: (ANDW x (MOVDconst [c]))
// cond:
// result: (ANDWconst [c] x)
}
return false
}
-func rewriteValueS390X_OpS390XANDWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XANDWconst(v *Value) bool {
// match: (ANDWconst [c] (ANDWconst [d] x))
// cond:
// result: (ANDWconst [c & d] x)
}
return false
}
-func rewriteValueS390X_OpS390XANDconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XANDconst(v *Value) bool {
// match: (ANDconst [c] (ANDconst [d] x))
// cond:
// result: (ANDconst [c & d] x)
}
return false
}
-func rewriteValueS390X_OpS390XCMP(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XCMP(v *Value) bool {
b := v.Block
_ = b
// match: (CMP x (MOVDconst [c]))
}
return false
}
-func rewriteValueS390X_OpS390XCMPU(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XCMPU(v *Value) bool {
b := v.Block
_ = b
// match: (CMPU x (MOVDconst [c]))
}
return false
}
-func rewriteValueS390X_OpS390XCMPUconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XCMPUconst(v *Value) bool {
// match: (CMPUconst (MOVDconst [x]) [y])
// cond: uint64(x)==uint64(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValueS390X_OpS390XCMPW(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XCMPW(v *Value) bool {
b := v.Block
_ = b
// match: (CMPW x (MOVDconst [c]))
}
return false
}
-func rewriteValueS390X_OpS390XCMPWU(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XCMPWU(v *Value) bool {
b := v.Block
_ = b
// match: (CMPWU x (MOVDconst [c]))
}
return false
}
-func rewriteValueS390X_OpS390XCMPWUconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XCMPWUconst(v *Value) bool {
// match: (CMPWUconst (MOVDconst [x]) [y])
// cond: uint32(x)==uint32(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValueS390X_OpS390XCMPWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XCMPWconst(v *Value) bool {
// match: (CMPWconst (MOVDconst [x]) [y])
// cond: int32(x)==int32(y)
// result: (FlagEQ)
}
return false
}
-func rewriteValueS390X_OpS390XCMPconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XCMPconst(v *Value) bool {
// match: (CMPconst (MOVDconst [x]) [y])
// cond: x==y
// result: (FlagEQ)
}
return false
}
-func rewriteValueS390X_OpS390XFADD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFADD(v *Value) bool {
// match: (FADD x (FMUL y z))
// cond:
// result: (FMADD x y z)
}
return false
}
-func rewriteValueS390X_OpS390XFADDS(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFADDS(v *Value) bool {
// match: (FADDS x (FMULS y z))
// cond:
// result: (FMADDS x y z)
}
return false
}
-func rewriteValueS390X_OpS390XFMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool {
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is20Bit(off1+off2)
// result: (FMOVDload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value) bool {
// match: (FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
// cond:
// result: (FMOVDloadidx [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueS390X_OpS390XFMOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool {
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is20Bit(off1+off2)
// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value) bool {
// match: (FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
// cond:
// result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueS390X_OpS390XFMOVSload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool {
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is20Bit(off1+off2)
// result: (FMOVSload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value) bool {
// match: (FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
// cond:
// result: (FMOVSloadidx [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueS390X_OpS390XFMOVSstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool {
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is20Bit(off1+off2)
// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value) bool {
// match: (FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
// cond:
// result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueS390X_OpS390XFSUB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFSUB(v *Value) bool {
// match: (FSUB (FMUL y z) x)
// cond:
// result: (FMSUB x y z)
}
return false
}
-func rewriteValueS390X_OpS390XFSUBS(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XFSUBS(v *Value) bool {
// match: (FSUBS (FMULS y z) x)
// cond:
// result: (FMSUBS x y z)
}
return false
}
-func rewriteValueS390X_OpS390XLoweredRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool {
// match: (LoweredRound32F x:(FMOVSconst))
// cond:
// result: x
}
return false
}
-func rewriteValueS390X_OpS390XLoweredRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XLoweredRound64F(v *Value) bool {
// match: (LoweredRound64F x:(FMOVDconst))
// cond:
// result: x
}
return false
}
-func rewriteValueS390X_OpS390XMOVBZload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool {
// match: (MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVBZreg x)
}
return false
}
-func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool {
// match: (MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
// cond:
// result: (MOVBZloadidx [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVBZreg(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBZreg x:(MOVDLT (MOVDconst [c]) (MOVDconst [d]) _))
}
return false
}
-func rewriteValueS390X_OpS390XMOVBload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVBload(v *Value) bool {
// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is20Bit(off1+off2)
// result: (MOVBload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVBreg(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVBreg x:(MOVBload _ _))
}
return false
}
-func rewriteValueS390X_OpS390XMOVBstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool {
// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr x mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool {
// match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool {
// match: (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
// cond:
// result: (MOVBstoreidx [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDEQ(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDEQ(v *Value) bool {
// match: (MOVDEQ x y (InvertFlags cmp))
// cond:
// result: (MOVDEQ x y cmp)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDGE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDGE(v *Value) bool {
// match: (MOVDGE x y (InvertFlags cmp))
// cond:
// result: (MOVDLE x y cmp)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDGT(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDGT(v *Value) bool {
// match: (MOVDGT x y (InvertFlags cmp))
// cond:
// result: (MOVDLT x y cmp)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDLE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDLE(v *Value) bool {
// match: (MOVDLE x y (InvertFlags cmp))
// cond:
// result: (MOVDGE x y cmp)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDLT(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDLT(v *Value) bool {
// match: (MOVDLT x y (InvertFlags cmp))
// cond:
// result: (MOVDGT x y cmp)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDNE(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDNE(v *Value) bool {
// match: (MOVDNE x y (InvertFlags cmp))
// cond:
// result: (MOVDNE x y cmp)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDaddridx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool {
// match: (MOVDaddridx [c] {s} (ADDconst [d] x) y)
// cond: is20Bit(c+d) && x.Op != OpSB
// result: (MOVDaddridx [c+d] {s} x y)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDload(v *Value) bool {
// match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVDreg x)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDloadidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool {
// match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
// cond:
// result: (MOVDloadidx [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDnop(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVDnop(v *Value) bool {
b := v.Block
_ = b
// match: (MOVDnop <t> x)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDreg(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVDreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVDreg <t> x)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is20Bit(off1+off2)
// result: (MOVDstore [off1+off2] {sym} ptr val mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool {
// match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool {
// match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
// cond:
// result: (MOVDstoreidx [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVHBRstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool {
// match: (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWBRstore [i-2] {s} p w mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool {
// match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVWBRstoreidx [i-2] {s} p idx w mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVHZload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool {
// match: (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVHZreg x)
}
return false
}
-func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool {
// match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
// cond:
// result: (MOVHZloadidx [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVHZreg(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVHZreg x:(MOVBZload _ _))
}
return false
}
-func rewriteValueS390X_OpS390XMOVHload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVHload(v *Value) bool {
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is20Bit(off1+off2)
// result: (MOVHload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVHreg(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVHreg x:(MOVBload _ _))
}
return false
}
-func rewriteValueS390X_OpS390XMOVHstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool {
// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
// cond:
// result: (MOVHstore [off] {sym} ptr x mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool {
// match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool {
// match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
// cond:
// result: (MOVHstoreidx [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVWBRstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool {
// match: (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVDBRstore [i-4] {s} p w mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool {
// match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem))
// cond: x.Uses == 1 && clobber(x)
// result: (MOVDBRstoreidx [i-4] {s} p idx w mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVWZload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool {
// match: (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVWZreg x)
}
return false
}
-func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool {
// match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
// cond:
// result: (MOVWZloadidx [c+d] {sym} ptr idx mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVWZreg(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWZreg x:(MOVBZload _ _))
}
return false
}
-func rewriteValueS390X_OpS390XMOVWload(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVWload(v *Value) bool {
// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is20Bit(off1+off2)
// result: (MOVWload [off1+off2] {sym} ptr mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVWreg(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool {
b := v.Block
_ = b
// match: (MOVWreg x:(MOVBload _ _))
}
return false
}
-func rewriteValueS390X_OpS390XMOVWstore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
// cond:
// result: (MOVWstore [off] {sym} ptr x mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem)
// cond: ValAndOff(sc).canAdd(off)
// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
v.AuxInt = ValAndOff(a).Off()
v.Aux = s
v.AddArg(p)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32
v.AddArg(v0)
v.AddArg(mem)
}
return false
}
-func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool {
// match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
// cond:
// result: (MOVWstoreidx [c+d] {sym} ptr idx val mem)
}
return false
}
-func rewriteValueS390X_OpS390XMULLD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMULLD(v *Value) bool {
// match: (MULLD x (MOVDconst [c]))
// cond: is32Bit(c)
// result: (MULLDconst [c] x)
}
return false
}
-func rewriteValueS390X_OpS390XMULLDconst(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool {
b := v.Block
_ = b
// match: (MULLDconst [-1] x)
}
return false
}
-func rewriteValueS390X_OpS390XMULLW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XMULLW(v *Value) bool {
// match: (MULLW x (MOVDconst [c]))
// cond:
// result: (MULLWconst [c] x)
}
return false
}
-func rewriteValueS390X_OpS390XMULLWconst(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool {
b := v.Block
_ = b
// match: (MULLWconst [-1] x)
}
return false
}
-func rewriteValueS390X_OpS390XNEG(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XNEG(v *Value) bool {
// match: (NEG (MOVDconst [c]))
// cond:
// result: (MOVDconst [-c])
}
return false
}
-func rewriteValueS390X_OpS390XNEGW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XNEGW(v *Value) bool {
// match: (NEGW (MOVDconst [c]))
// cond:
// result: (MOVDconst [int64(int32(-c))])
}
return false
}
-func rewriteValueS390X_OpS390XNOT(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XNOT(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (NOT x)
// cond: true
// result: (XOR (MOVDconst [-1]) x)
break
}
v.reset(OpS390XXOR)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, fe.TypeUInt64())
v0.AuxInt = -1
v.AddArg(v0)
v.AddArg(x)
}
return false
}
-func rewriteValueS390X_OpS390XNOTW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XNOTW(v *Value) bool {
// match: (NOTW x)
// cond: true
// result: (XORWconst [-1] x)
}
return false
}
-func rewriteValueS390X_OpS390XOR(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XOR(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (OR x (MOVDconst [c]))
// cond: isU32Bit(c)
// result: (ORconst [c] x)
break
}
b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, fe.TypeUInt64())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i
break
}
b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDload, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, fe.TypeUInt64())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i - 7
}
return false
}
-func rewriteValueS390X_OpS390XORW(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XORW(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ORW x (MOVDconst [c]))
// cond:
// result: (ORWconst [c] x)
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v.reset(OpCopy)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, config.fe.TypeUInt16())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, fe.TypeUInt16())
v1.AuxInt = i
v1.Aux = s
v1.AddArg(p)
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, fe.TypeUInt32())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, fe.TypeUInt64())
v.reset(OpCopy)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, v.Type)
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, fe.TypeUInt64())
v.reset(OpCopy)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, v.Type)
break
}
b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, fe.TypeUInt16())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i - 1
break
}
b = mergePoint(b, x0, x1, x2)
- v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, fe.TypeUInt32())
v.reset(OpCopy)
v.AddArg(v0)
v0.AuxInt = i - 2
}
return false
}
-func rewriteValueS390X_OpS390XORWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XORWconst(v *Value) bool {
// match: (ORWconst [c] x)
// cond: int32(c)==0
// result: x
}
return false
}
-func rewriteValueS390X_OpS390XORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XORconst(v *Value) bool {
// match: (ORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueS390X_OpS390XSLD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSLD(v *Value) bool {
// match: (SLD x (MOVDconst [c]))
// cond:
// result: (SLDconst [c&63] x)
}
return false
}
-func rewriteValueS390X_OpS390XSLW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSLW(v *Value) bool {
// match: (SLW x (MOVDconst [c]))
// cond:
// result: (SLWconst [c&63] x)
}
return false
}
-func rewriteValueS390X_OpS390XSRAD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSRAD(v *Value) bool {
// match: (SRAD x (MOVDconst [c]))
// cond:
// result: (SRADconst [c&63] x)
}
return false
}
-func rewriteValueS390X_OpS390XSRADconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSRADconst(v *Value) bool {
// match: (SRADconst [c] (MOVDconst [d]))
// cond:
// result: (MOVDconst [d>>uint64(c)])
}
return false
}
-func rewriteValueS390X_OpS390XSRAW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSRAW(v *Value) bool {
// match: (SRAW x (MOVDconst [c]))
// cond:
// result: (SRAWconst [c&63] x)
}
return false
}
-func rewriteValueS390X_OpS390XSRAWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSRAWconst(v *Value) bool {
// match: (SRAWconst [c] (MOVDconst [d]))
// cond:
// result: (MOVDconst [d>>uint64(c)])
}
return false
}
-func rewriteValueS390X_OpS390XSRD(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSRD(v *Value) bool {
// match: (SRD x (MOVDconst [c]))
// cond:
// result: (SRDconst [c&63] x)
}
return false
}
-func rewriteValueS390X_OpS390XSRW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSRW(v *Value) bool {
// match: (SRW x (MOVDconst [c]))
// cond:
// result: (SRWconst [c&63] x)
}
return false
}
-func rewriteValueS390X_OpS390XSTM2(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSTM2(v *Value) bool {
// match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
// cond: x.Uses == 1 && is20Bit(i-8) && clobber(x)
// result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
}
return false
}
-func rewriteValueS390X_OpS390XSTMG2(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSTMG2(v *Value) bool {
// match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
// cond: x.Uses == 1 && is20Bit(i-16) && clobber(x)
// result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
}
return false
}
-func rewriteValueS390X_OpS390XSUB(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XSUB(v *Value) bool {
b := v.Block
_ = b
// match: (SUB x (MOVDconst [c]))
}
return false
}
-func rewriteValueS390X_OpS390XSUBEWcarrymask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSUBEWcarrymask(v *Value) bool {
// match: (SUBEWcarrymask (FlagEQ))
// cond:
// result: (MOVDconst [-1])
}
return false
}
-func rewriteValueS390X_OpS390XSUBEcarrymask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSUBEcarrymask(v *Value) bool {
// match: (SUBEcarrymask (FlagEQ))
// cond:
// result: (MOVDconst [-1])
}
return false
}
-func rewriteValueS390X_OpS390XSUBW(v *Value, config *Config) bool {
+func rewriteValueS390X_OpS390XSUBW(v *Value) bool {
b := v.Block
_ = b
// match: (SUBW x (MOVDconst [c]))
}
return false
}
-func rewriteValueS390X_OpS390XSUBWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSUBWconst(v *Value) bool {
// match: (SUBWconst [c] x)
// cond: int32(c) == 0
// result: x
return true
}
}
-func rewriteValueS390X_OpS390XSUBconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XSUBconst(v *Value) bool {
// match: (SUBconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueS390X_OpS390XXOR(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XXOR(v *Value) bool {
// match: (XOR x (MOVDconst [c]))
// cond: isU32Bit(c)
// result: (XORconst [c] x)
}
return false
}
-func rewriteValueS390X_OpS390XXORW(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XXORW(v *Value) bool {
// match: (XORW x (MOVDconst [c]))
// cond:
// result: (XORWconst [c] x)
}
return false
}
-func rewriteValueS390X_OpS390XXORWconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XXORWconst(v *Value) bool {
// match: (XORWconst [c] x)
// cond: int32(c)==0
// result: x
}
return false
}
-func rewriteValueS390X_OpS390XXORconst(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpS390XXORconst(v *Value) bool {
// match: (XORconst [0] x)
// cond:
// result: x
}
return false
}
-func rewriteValueS390X_OpSelect0(v *Value, config *Config) bool {
+func rewriteValueS390X_OpSelect0(v *Value) bool {
b := v.Block
_ = b
// match: (Select0 <t> (AddTupleFirst32 tuple val))
}
return false
}
-func rewriteValueS390X_OpSelect1(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSelect1(v *Value) bool {
// match: (Select1 (AddTupleFirst32 tuple _ ))
// cond:
// result: (Select1 tuple)
}
return false
}
-func rewriteValueS390X_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValueS390X_OpSignExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSignExt16to64(v *Value) bool {
// match: (SignExt16to64 x)
// cond:
// result: (MOVHreg x)
return true
}
}
-func rewriteValueS390X_OpSignExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSignExt32to64(v *Value) bool {
// match: (SignExt32to64 x)
// cond:
// result: (MOVWreg x)
return true
}
}
-func rewriteValueS390X_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueS390X_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueS390X_OpSignExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSignExt8to64(v *Value) bool {
// match: (SignExt8to64 x)
// cond:
// result: (MOVBreg x)
return true
}
}
-func rewriteValueS390X_OpSlicemask(v *Value, config *Config) bool {
+func rewriteValueS390X_OpSlicemask(v *Value) bool {
b := v.Block
_ = b
// match: (Slicemask <t> x)
return true
}
}
-func rewriteValueS390X_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSqrt(v *Value) bool {
// match: (Sqrt x)
// cond:
// result: (FSQRT x)
return true
}
}
-func rewriteValueS390X_OpStaticCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpStaticCall(v *Value) bool {
// match: (StaticCall [argwid] {target} mem)
// cond:
// result: (CALLstatic [argwid] {target} mem)
return true
}
}
-func rewriteValueS390X_OpStore(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpStore(v *Value) bool {
// match: (Store {t} ptr val mem)
// cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
}
return false
}
-func rewriteValueS390X_OpSub16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSub16(v *Value) bool {
// match: (Sub16 x y)
// cond:
// result: (SUBW x y)
return true
}
}
-func rewriteValueS390X_OpSub32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSub32(v *Value) bool {
// match: (Sub32 x y)
// cond:
// result: (SUBW x y)
return true
}
}
-func rewriteValueS390X_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSub32F(v *Value) bool {
// match: (Sub32F x y)
// cond:
// result: (FSUBS x y)
return true
}
}
-func rewriteValueS390X_OpSub64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSub64(v *Value) bool {
// match: (Sub64 x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueS390X_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSub64F(v *Value) bool {
// match: (Sub64F x y)
// cond:
// result: (FSUB x y)
return true
}
}
-func rewriteValueS390X_OpSub8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSub8(v *Value) bool {
// match: (Sub8 x y)
// cond:
// result: (SUBW x y)
return true
}
}
-func rewriteValueS390X_OpSubPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpSubPtr(v *Value) bool {
// match: (SubPtr x y)
// cond:
// result: (SUB x y)
return true
}
}
-func rewriteValueS390X_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueS390X_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueS390X_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueS390X_OpTrunc64to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpTrunc64to16(v *Value) bool {
// match: (Trunc64to16 x)
// cond:
// result: x
return true
}
}
-func rewriteValueS390X_OpTrunc64to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpTrunc64to32(v *Value) bool {
// match: (Trunc64to32 x)
// cond:
// result: x
return true
}
}
-func rewriteValueS390X_OpTrunc64to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpTrunc64to8(v *Value) bool {
// match: (Trunc64to8 x)
// cond:
// result: x
return true
}
}
-func rewriteValueS390X_OpXor16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpXor16(v *Value) bool {
// match: (Xor16 x y)
// cond:
// result: (XORW x y)
return true
}
}
-func rewriteValueS390X_OpXor32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpXor32(v *Value) bool {
// match: (Xor32 x y)
// cond:
// result: (XORW x y)
return true
}
}
-func rewriteValueS390X_OpXor64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpXor64(v *Value) bool {
// match: (Xor64 x y)
// cond:
// result: (XOR x y)
return true
}
}
-func rewriteValueS390X_OpXor8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpXor8(v *Value) bool {
// match: (Xor8 x y)
// cond:
// result: (XORW x y)
return true
}
}
-func rewriteValueS390X_OpZero(v *Value, config *Config) bool {
+func rewriteValueS390X_OpZero(v *Value) bool {
b := v.Block
_ = b
// match: (Zero [0] _ mem)
}
return false
}
-func rewriteValueS390X_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 x)
// cond:
// result: (MOVHZreg x)
return true
}
}
-func rewriteValueS390X_OpZeroExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpZeroExt16to64(v *Value) bool {
// match: (ZeroExt16to64 x)
// cond:
// result: (MOVHZreg x)
return true
}
}
-func rewriteValueS390X_OpZeroExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpZeroExt32to64(v *Value) bool {
// match: (ZeroExt32to64 x)
// cond:
// result: (MOVWZreg x)
return true
}
}
-func rewriteValueS390X_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 x)
// cond:
// result: (MOVBZreg x)
return true
}
}
-func rewriteValueS390X_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 x)
// cond:
// result: (MOVBZreg x)
return true
}
}
-func rewriteValueS390X_OpZeroExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValueS390X_OpZeroExt8to64(v *Value) bool {
// match: (ZeroExt8to64 x)
// cond:
// result: (MOVBZreg x)
return true
}
}
-func rewriteBlockS390X(b *Block, config *Config) bool {
+func rewriteBlockS390X(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case BlockS390XEQ:
// match: (EQ (InvertFlags cmp) yes no)
}
// match: (If cond yes no)
// cond:
- // result: (NE (CMPWconst [0] (MOVBZreg <config.fe.TypeBool()> cond)) yes no)
+ // result: (NE (CMPWconst [0] (MOVBZreg <fe.TypeBool()> cond)) yes no)
for {
v := b.Control
_ = v
b.Kind = BlockS390XNE
v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, TypeFlags)
v0.AuxInt = 0
- v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, config.fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, fe.TypeBool())
v1.AddArg(cond)
v0.AddArg(v1)
b.SetControl(v0)
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValuedec(v *Value, config *Config) bool {
+func rewriteValuedec(v *Value) bool {
switch v.Op {
case OpComplexImag:
- return rewriteValuedec_OpComplexImag(v, config)
+ return rewriteValuedec_OpComplexImag(v)
case OpComplexReal:
- return rewriteValuedec_OpComplexReal(v, config)
+ return rewriteValuedec_OpComplexReal(v)
case OpIData:
- return rewriteValuedec_OpIData(v, config)
+ return rewriteValuedec_OpIData(v)
case OpITab:
- return rewriteValuedec_OpITab(v, config)
+ return rewriteValuedec_OpITab(v)
case OpLoad:
- return rewriteValuedec_OpLoad(v, config)
+ return rewriteValuedec_OpLoad(v)
case OpSliceCap:
- return rewriteValuedec_OpSliceCap(v, config)
+ return rewriteValuedec_OpSliceCap(v)
case OpSliceLen:
- return rewriteValuedec_OpSliceLen(v, config)
+ return rewriteValuedec_OpSliceLen(v)
case OpSlicePtr:
- return rewriteValuedec_OpSlicePtr(v, config)
+ return rewriteValuedec_OpSlicePtr(v)
case OpStore:
- return rewriteValuedec_OpStore(v, config)
+ return rewriteValuedec_OpStore(v)
case OpStringLen:
- return rewriteValuedec_OpStringLen(v, config)
+ return rewriteValuedec_OpStringLen(v)
case OpStringPtr:
- return rewriteValuedec_OpStringPtr(v, config)
+ return rewriteValuedec_OpStringPtr(v)
}
return false
}
-func rewriteValuedec_OpComplexImag(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec_OpComplexImag(v *Value) bool {
// match: (ComplexImag (ComplexMake _ imag ))
// cond:
// result: imag
}
return false
}
-func rewriteValuedec_OpComplexReal(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec_OpComplexReal(v *Value) bool {
// match: (ComplexReal (ComplexMake real _ ))
// cond:
// result: real
}
return false
}
-func rewriteValuedec_OpIData(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec_OpIData(v *Value) bool {
// match: (IData (IMake _ data))
// cond:
// result: data
}
return false
}
-func rewriteValuedec_OpITab(v *Value, config *Config) bool {
+func rewriteValuedec_OpITab(v *Value) bool {
b := v.Block
_ = b
// match: (ITab (IMake itab _))
}
return false
}
-func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
+func rewriteValuedec_OpLoad(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 8
- // result: (ComplexMake (Load <config.fe.TypeFloat32()> ptr mem) (Load <config.fe.TypeFloat32()> (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr) mem) )
+ // result: (ComplexMake (Load <fe.TypeFloat32()> ptr mem) (Load <fe.TypeFloat32()> (OffPtr <fe.TypeFloat32().PtrTo()> [4] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
break
}
v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeFloat32())
+ v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat32())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeFloat32())
- v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeFloat32().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat32())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat32().PtrTo())
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
}
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 16
- // result: (ComplexMake (Load <config.fe.TypeFloat64()> ptr mem) (Load <config.fe.TypeFloat64()> (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr) mem) )
+ // result: (ComplexMake (Load <fe.TypeFloat64()> ptr mem) (Load <fe.TypeFloat64()> (OffPtr <fe.TypeFloat64().PtrTo()> [8] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
break
}
v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat64())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeFloat64())
- v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeFloat64().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat64())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat64().PtrTo())
v2.AuxInt = 8
v2.AddArg(ptr)
v1.AddArg(v2)
}
// match: (Load <t> ptr mem)
// cond: t.IsString()
- // result: (StringMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem))
+ // result: (StringMake (Load <fe.TypeBytePtr()> ptr mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeInt())
- v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
}
// match: (Load <t> ptr mem)
// cond: t.IsSlice()
- // result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr) mem))
+ // result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeInt())
- v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeInt())
- v4 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
+ v3 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
+ v4 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v4.AuxInt = 2 * config.PtrSize
v4.AddArg(ptr)
v3.AddArg(v4)
}
// match: (Load <t> ptr mem)
// cond: t.IsInterface()
- // result: (IMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeBytePtr()> (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr) mem))
+ // result: (IMake (Load <fe.TypeBytePtr()> ptr mem) (Load <fe.TypeBytePtr()> (OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
break
}
v.reset(OpIMake)
- v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeBytePtr())
- v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeBytePtr().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
}
return false
}
-func rewriteValuedec_OpSliceCap(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec_OpSliceCap(v *Value) bool {
// match: (SliceCap (SliceMake _ _ cap))
// cond:
// result: cap
}
return false
}
-func rewriteValuedec_OpSliceLen(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec_OpSliceLen(v *Value) bool {
// match: (SliceLen (SliceMake _ len _))
// cond:
// result: len
}
return false
}
-func rewriteValuedec_OpSlicePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec_OpSlicePtr(v *Value) bool {
// match: (SlicePtr (SliceMake ptr _ _ ))
// cond:
// result: ptr
}
return false
}
-func rewriteValuedec_OpStore(v *Value, config *Config) bool {
+func rewriteValuedec_OpStore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Store {t} dst (ComplexMake real imag) mem)
// cond: t.(Type).Size() == 8
- // result: (Store {config.fe.TypeFloat32()} (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst) imag (Store {config.fe.TypeFloat32()} dst real mem))
+ // result: (Store {fe.TypeFloat32()} (OffPtr <fe.TypeFloat32().PtrTo()> [4] dst) imag (Store {fe.TypeFloat32()} dst real mem))
for {
t := v.Aux
dst := v.Args[0]
break
}
v.reset(OpStore)
- v.Aux = config.fe.TypeFloat32()
- v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeFloat32().PtrTo())
+ v.Aux = fe.TypeFloat32()
+ v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat32().PtrTo())
v0.AuxInt = 4
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = config.fe.TypeFloat32()
+ v1.Aux = fe.TypeFloat32()
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
}
// match: (Store {t} dst (ComplexMake real imag) mem)
// cond: t.(Type).Size() == 16
- // result: (Store {config.fe.TypeFloat64()} (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst) imag (Store {config.fe.TypeFloat64()} dst real mem))
+ // result: (Store {fe.TypeFloat64()} (OffPtr <fe.TypeFloat64().PtrTo()> [8] dst) imag (Store {fe.TypeFloat64()} dst real mem))
for {
t := v.Aux
dst := v.Args[0]
break
}
v.reset(OpStore)
- v.Aux = config.fe.TypeFloat64()
- v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeFloat64().PtrTo())
+ v.Aux = fe.TypeFloat64()
+ v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat64().PtrTo())
v0.AuxInt = 8
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = config.fe.TypeFloat64()
+ v1.Aux = fe.TypeFloat64()
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
}
// match: (Store dst (StringMake ptr len) mem)
// cond:
- // result: (Store {config.fe.TypeInt()} (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {config.fe.TypeBytePtr()} dst ptr mem))
+ // result: (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {fe.TypeBytePtr()} dst ptr mem))
for {
dst := v.Args[0]
v_1 := v.Args[1]
len := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
- v.Aux = config.fe.TypeInt()
- v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
+ v.Aux = fe.TypeInt()
+ v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(len)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = config.fe.TypeBytePtr()
+ v1.Aux = fe.TypeBytePtr()
v1.AddArg(dst)
v1.AddArg(ptr)
v1.AddArg(mem)
}
// match: (Store dst (SliceMake ptr len cap) mem)
// cond:
- // result: (Store {config.fe.TypeInt()} (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst) cap (Store {config.fe.TypeInt()} (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {config.fe.TypeBytePtr()} dst ptr mem)))
+ // result: (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] dst) cap (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {fe.TypeBytePtr()} dst ptr mem)))
for {
dst := v.Args[0]
v_1 := v.Args[1]
cap := v_1.Args[2]
mem := v.Args[2]
v.reset(OpStore)
- v.Aux = config.fe.TypeInt()
- v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
+ v.Aux = fe.TypeInt()
+ v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v0.AuxInt = 2 * config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(cap)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = config.fe.TypeInt()
- v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt().PtrTo())
+ v1.Aux = fe.TypeInt()
+ v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(len)
v3 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v3.Aux = config.fe.TypeBytePtr()
+ v3.Aux = fe.TypeBytePtr()
v3.AddArg(dst)
v3.AddArg(ptr)
v3.AddArg(mem)
}
// match: (Store dst (IMake itab data) mem)
// cond:
- // result: (Store {config.fe.TypeBytePtr()} (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst) data (Store {config.fe.TypeUintptr()} dst itab mem))
+ // result: (Store {fe.TypeBytePtr()} (OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst) data (Store {fe.TypeUintptr()} dst itab mem))
for {
dst := v.Args[0]
v_1 := v.Args[1]
data := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
- v.Aux = config.fe.TypeBytePtr()
- v0 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
+ v.Aux = fe.TypeBytePtr()
+ v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeBytePtr().PtrTo())
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(data)
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
- v1.Aux = config.fe.TypeUintptr()
+ v1.Aux = fe.TypeUintptr()
v1.AddArg(dst)
v1.AddArg(itab)
v1.AddArg(mem)
}
return false
}
-func rewriteValuedec_OpStringLen(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec_OpStringLen(v *Value) bool {
// match: (StringLen (StringMake _ len))
// cond:
// result: len
}
return false
}
-func rewriteValuedec_OpStringPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec_OpStringPtr(v *Value) bool {
// match: (StringPtr (StringMake ptr _))
// cond:
// result: ptr
}
return false
}
-func rewriteBlockdec(b *Block, config *Config) bool {
+func rewriteBlockdec(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
}
return false
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValuedec64(v *Value, config *Config) bool {
+func rewriteValuedec64(v *Value) bool {
switch v.Op {
case OpAdd64:
- return rewriteValuedec64_OpAdd64(v, config)
+ return rewriteValuedec64_OpAdd64(v)
case OpAnd64:
- return rewriteValuedec64_OpAnd64(v, config)
+ return rewriteValuedec64_OpAnd64(v)
case OpArg:
- return rewriteValuedec64_OpArg(v, config)
+ return rewriteValuedec64_OpArg(v)
case OpBitLen64:
- return rewriteValuedec64_OpBitLen64(v, config)
+ return rewriteValuedec64_OpBitLen64(v)
case OpBswap64:
- return rewriteValuedec64_OpBswap64(v, config)
+ return rewriteValuedec64_OpBswap64(v)
case OpCom64:
- return rewriteValuedec64_OpCom64(v, config)
+ return rewriteValuedec64_OpCom64(v)
case OpConst64:
- return rewriteValuedec64_OpConst64(v, config)
+ return rewriteValuedec64_OpConst64(v)
case OpCtz64:
- return rewriteValuedec64_OpCtz64(v, config)
+ return rewriteValuedec64_OpCtz64(v)
case OpEq64:
- return rewriteValuedec64_OpEq64(v, config)
+ return rewriteValuedec64_OpEq64(v)
case OpGeq64:
- return rewriteValuedec64_OpGeq64(v, config)
+ return rewriteValuedec64_OpGeq64(v)
case OpGeq64U:
- return rewriteValuedec64_OpGeq64U(v, config)
+ return rewriteValuedec64_OpGeq64U(v)
case OpGreater64:
- return rewriteValuedec64_OpGreater64(v, config)
+ return rewriteValuedec64_OpGreater64(v)
case OpGreater64U:
- return rewriteValuedec64_OpGreater64U(v, config)
+ return rewriteValuedec64_OpGreater64U(v)
case OpInt64Hi:
- return rewriteValuedec64_OpInt64Hi(v, config)
+ return rewriteValuedec64_OpInt64Hi(v)
case OpInt64Lo:
- return rewriteValuedec64_OpInt64Lo(v, config)
+ return rewriteValuedec64_OpInt64Lo(v)
case OpLeq64:
- return rewriteValuedec64_OpLeq64(v, config)
+ return rewriteValuedec64_OpLeq64(v)
case OpLeq64U:
- return rewriteValuedec64_OpLeq64U(v, config)
+ return rewriteValuedec64_OpLeq64U(v)
case OpLess64:
- return rewriteValuedec64_OpLess64(v, config)
+ return rewriteValuedec64_OpLess64(v)
case OpLess64U:
- return rewriteValuedec64_OpLess64U(v, config)
+ return rewriteValuedec64_OpLess64U(v)
case OpLoad:
- return rewriteValuedec64_OpLoad(v, config)
+ return rewriteValuedec64_OpLoad(v)
case OpLsh16x64:
- return rewriteValuedec64_OpLsh16x64(v, config)
+ return rewriteValuedec64_OpLsh16x64(v)
case OpLsh32x64:
- return rewriteValuedec64_OpLsh32x64(v, config)
+ return rewriteValuedec64_OpLsh32x64(v)
case OpLsh64x16:
- return rewriteValuedec64_OpLsh64x16(v, config)
+ return rewriteValuedec64_OpLsh64x16(v)
case OpLsh64x32:
- return rewriteValuedec64_OpLsh64x32(v, config)
+ return rewriteValuedec64_OpLsh64x32(v)
case OpLsh64x64:
- return rewriteValuedec64_OpLsh64x64(v, config)
+ return rewriteValuedec64_OpLsh64x64(v)
case OpLsh64x8:
- return rewriteValuedec64_OpLsh64x8(v, config)
+ return rewriteValuedec64_OpLsh64x8(v)
case OpLsh8x64:
- return rewriteValuedec64_OpLsh8x64(v, config)
+ return rewriteValuedec64_OpLsh8x64(v)
case OpMul64:
- return rewriteValuedec64_OpMul64(v, config)
+ return rewriteValuedec64_OpMul64(v)
case OpNeg64:
- return rewriteValuedec64_OpNeg64(v, config)
+ return rewriteValuedec64_OpNeg64(v)
case OpNeq64:
- return rewriteValuedec64_OpNeq64(v, config)
+ return rewriteValuedec64_OpNeq64(v)
case OpOr64:
- return rewriteValuedec64_OpOr64(v, config)
+ return rewriteValuedec64_OpOr64(v)
case OpRsh16Ux64:
- return rewriteValuedec64_OpRsh16Ux64(v, config)
+ return rewriteValuedec64_OpRsh16Ux64(v)
case OpRsh16x64:
- return rewriteValuedec64_OpRsh16x64(v, config)
+ return rewriteValuedec64_OpRsh16x64(v)
case OpRsh32Ux64:
- return rewriteValuedec64_OpRsh32Ux64(v, config)
+ return rewriteValuedec64_OpRsh32Ux64(v)
case OpRsh32x64:
- return rewriteValuedec64_OpRsh32x64(v, config)
+ return rewriteValuedec64_OpRsh32x64(v)
case OpRsh64Ux16:
- return rewriteValuedec64_OpRsh64Ux16(v, config)
+ return rewriteValuedec64_OpRsh64Ux16(v)
case OpRsh64Ux32:
- return rewriteValuedec64_OpRsh64Ux32(v, config)
+ return rewriteValuedec64_OpRsh64Ux32(v)
case OpRsh64Ux64:
- return rewriteValuedec64_OpRsh64Ux64(v, config)
+ return rewriteValuedec64_OpRsh64Ux64(v)
case OpRsh64Ux8:
- return rewriteValuedec64_OpRsh64Ux8(v, config)
+ return rewriteValuedec64_OpRsh64Ux8(v)
case OpRsh64x16:
- return rewriteValuedec64_OpRsh64x16(v, config)
+ return rewriteValuedec64_OpRsh64x16(v)
case OpRsh64x32:
- return rewriteValuedec64_OpRsh64x32(v, config)
+ return rewriteValuedec64_OpRsh64x32(v)
case OpRsh64x64:
- return rewriteValuedec64_OpRsh64x64(v, config)
+ return rewriteValuedec64_OpRsh64x64(v)
case OpRsh64x8:
- return rewriteValuedec64_OpRsh64x8(v, config)
+ return rewriteValuedec64_OpRsh64x8(v)
case OpRsh8Ux64:
- return rewriteValuedec64_OpRsh8Ux64(v, config)
+ return rewriteValuedec64_OpRsh8Ux64(v)
case OpRsh8x64:
- return rewriteValuedec64_OpRsh8x64(v, config)
+ return rewriteValuedec64_OpRsh8x64(v)
case OpSignExt16to64:
- return rewriteValuedec64_OpSignExt16to64(v, config)
+ return rewriteValuedec64_OpSignExt16to64(v)
case OpSignExt32to64:
- return rewriteValuedec64_OpSignExt32to64(v, config)
+ return rewriteValuedec64_OpSignExt32to64(v)
case OpSignExt8to64:
- return rewriteValuedec64_OpSignExt8to64(v, config)
+ return rewriteValuedec64_OpSignExt8to64(v)
case OpStore:
- return rewriteValuedec64_OpStore(v, config)
+ return rewriteValuedec64_OpStore(v)
case OpSub64:
- return rewriteValuedec64_OpSub64(v, config)
+ return rewriteValuedec64_OpSub64(v)
case OpTrunc64to16:
- return rewriteValuedec64_OpTrunc64to16(v, config)
+ return rewriteValuedec64_OpTrunc64to16(v)
case OpTrunc64to32:
- return rewriteValuedec64_OpTrunc64to32(v, config)
+ return rewriteValuedec64_OpTrunc64to32(v)
case OpTrunc64to8:
- return rewriteValuedec64_OpTrunc64to8(v, config)
+ return rewriteValuedec64_OpTrunc64to8(v)
case OpXor64:
- return rewriteValuedec64_OpXor64(v, config)
+ return rewriteValuedec64_OpXor64(v)
case OpZeroExt16to64:
- return rewriteValuedec64_OpZeroExt16to64(v, config)
+ return rewriteValuedec64_OpZeroExt16to64(v)
case OpZeroExt32to64:
- return rewriteValuedec64_OpZeroExt32to64(v, config)
+ return rewriteValuedec64_OpZeroExt32to64(v)
case OpZeroExt8to64:
- return rewriteValuedec64_OpZeroExt8to64(v, config)
+ return rewriteValuedec64_OpZeroExt8to64(v)
}
return false
}
-func rewriteValuedec64_OpAdd64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpAdd64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Add64 x y)
// cond:
- // result: (Int64Make (Add32withcarry <config.fe.TypeInt32()> (Int64Hi x) (Int64Hi y) (Select1 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 <config.fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
+ // result: (Int64Make (Add32withcarry <fe.TypeInt32()> (Int64Hi x) (Int64Hi y) (Select1 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 <fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpAdd32withcarry, config.fe.TypeInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpAdd32withcarry, fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
- v4 := b.NewValue0(v.Pos, OpAdd32carry, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
- v5 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpAdd32carry, MakeTuple(fe.TypeUInt32(), TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
v0.AddArg(v3)
v.AddArg(v0)
- v7 := b.NewValue0(v.Pos, OpSelect0, config.fe.TypeUInt32())
- v8 := b.NewValue0(v.Pos, OpAdd32carry, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpSelect0, fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpAdd32carry, MakeTuple(fe.TypeUInt32(), TypeFlags))
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(x)
v8.AddArg(v9)
- v10 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v10 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v10.AddArg(y)
v8.AddArg(v10)
v7.AddArg(v8)
return true
}
}
-func rewriteValuedec64_OpAnd64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpAnd64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (And64 x y)
// cond:
- // result: (Int64Make (And32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (And32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ // result: (Int64Make (And32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (And32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpAnd32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpAnd32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAnd32, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAnd32, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
return true
}
}
-func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
+func rewriteValuedec64_OpArg(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned()
- // result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
+ // result: (Int64Make (Arg <fe.TypeInt32()> {n} [off+4]) (Arg <fe.TypeUInt32()> {n} [off]))
for {
off := v.AuxInt
n := v.Aux
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpArg, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpArg, fe.TypeInt32())
v0.AuxInt = off + 4
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeUInt32())
v1.AuxInt = off
v1.Aux = n
v.AddArg(v1)
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned()
- // result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
+ // result: (Int64Make (Arg <fe.TypeUInt32()> {n} [off+4]) (Arg <fe.TypeUInt32()> {n} [off]))
for {
off := v.AuxInt
n := v.Aux
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpArg, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpArg, fe.TypeUInt32())
v0.AuxInt = off + 4
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeUInt32())
v1.AuxInt = off
v1.Aux = n
v.AddArg(v1)
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned()
- // result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off]) (Arg <config.fe.TypeUInt32()> {n} [off+4]))
+ // result: (Int64Make (Arg <fe.TypeInt32()> {n} [off]) (Arg <fe.TypeUInt32()> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpArg, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpArg, fe.TypeInt32())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeUInt32())
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned()
- // result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off]) (Arg <config.fe.TypeUInt32()> {n} [off+4]))
+ // result: (Int64Make (Arg <fe.TypeUInt32()> {n} [off]) (Arg <fe.TypeUInt32()> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpArg, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpArg, fe.TypeUInt32())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeUInt32())
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
}
return false
}
-func rewriteValuedec64_OpBitLen64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpBitLen64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (BitLen64 x)
// cond:
- // result: (Add32 <config.fe.TypeInt()> (BitLen32 <config.fe.TypeInt()> (Int64Hi x)) (BitLen32 <config.fe.TypeInt()> (Or32 <config.fe.TypeUInt32()> (Int64Lo x) (Zeromask (Int64Hi x)))))
+ // result: (Add32 <fe.TypeInt()> (BitLen32 <fe.TypeInt()> (Int64Hi x)) (BitLen32 <fe.TypeInt()> (Or32 <fe.TypeUInt32()> (Int64Lo x) (Zeromask (Int64Hi x)))))
for {
x := v.Args[0]
v.reset(OpAdd32)
- v.Type = config.fe.TypeInt()
- v0 := b.NewValue0(v.Pos, OpBitLen32, config.fe.TypeInt())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v.Type = fe.TypeInt()
+ v0 := b.NewValue0(v.Pos, OpBitLen32, fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpBitLen32, config.fe.TypeInt())
- v3 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpBitLen32, fe.TypeInt())
+ v3 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(x)
v5.AddArg(v6)
v3.AddArg(v5)
return true
}
}
-func rewriteValuedec64_OpBswap64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpBswap64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Bswap64 x)
// cond:
- // result: (Int64Make (Bswap32 <config.fe.TypeUInt32()> (Int64Lo x)) (Bswap32 <config.fe.TypeUInt32()> (Int64Hi x)))
+ // result: (Int64Make (Bswap32 <fe.TypeUInt32()> (Int64Lo x)) (Bswap32 <fe.TypeUInt32()> (Int64Hi x)))
for {
x := v.Args[0]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpBswap32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpBswap32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpBswap32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpBswap32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v3.AddArg(x)
v2.AddArg(v3)
v.AddArg(v2)
return true
}
}
-func rewriteValuedec64_OpCom64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpCom64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Com64 x)
// cond:
- // result: (Int64Make (Com32 <config.fe.TypeUInt32()> (Int64Hi x)) (Com32 <config.fe.TypeUInt32()> (Int64Lo x)))
+ // result: (Int64Make (Com32 <fe.TypeUInt32()> (Int64Hi x)) (Com32 <fe.TypeUInt32()> (Int64Lo x)))
for {
x := v.Args[0]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpCom32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpCom32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpCom32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpCom32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v3.AddArg(x)
v2.AddArg(v3)
v.AddArg(v2)
return true
}
}
-func rewriteValuedec64_OpConst64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpConst64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Const64 <t> [c])
// cond: t.IsSigned()
- // result: (Int64Make (Const32 <config.fe.TypeInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
+ // result: (Int64Make (Const32 <fe.TypeInt32()> [c>>32]) (Const32 <fe.TypeUInt32()> [int64(int32(c))]))
for {
t := v.Type
c := v.AuxInt
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpConst32, fe.TypeInt32())
v0.AuxInt = c >> 32
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v1.AuxInt = int64(int32(c))
v.AddArg(v1)
return true
}
// match: (Const64 <t> [c])
// cond: !t.IsSigned()
- // result: (Int64Make (Const32 <config.fe.TypeUInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
+ // result: (Int64Make (Const32 <fe.TypeUInt32()> [c>>32]) (Const32 <fe.TypeUInt32()> [int64(int32(c))]))
for {
t := v.Type
c := v.AuxInt
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v0.AuxInt = c >> 32
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v1.AuxInt = int64(int32(c))
v.AddArg(v1)
return true
}
return false
}
-func rewriteValuedec64_OpCtz64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpCtz64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Ctz64 x)
// cond:
- // result: (Add32 <config.fe.TypeUInt32()> (Ctz32 <config.fe.TypeUInt32()> (Int64Lo x)) (And32 <config.fe.TypeUInt32()> (Com32 <config.fe.TypeUInt32()> (Zeromask (Int64Lo x))) (Ctz32 <config.fe.TypeUInt32()> (Int64Hi x))))
+ // result: (Add32 <fe.TypeUInt32()> (Ctz32 <fe.TypeUInt32()> (Int64Lo x)) (And32 <fe.TypeUInt32()> (Com32 <fe.TypeUInt32()> (Zeromask (Int64Lo x))) (Ctz32 <fe.TypeUInt32()> (Int64Hi x))))
for {
x := v.Args[0]
v.reset(OpAdd32)
- v.Type = config.fe.TypeUInt32()
- v0 := b.NewValue0(v.Pos, OpCtz32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v.Type = fe.TypeUInt32()
+ v0 := b.NewValue0(v.Pos, OpCtz32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpAnd32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpCom32, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
- v5 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpAnd32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpCom32, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
v3.AddArg(v4)
v2.AddArg(v3)
- v6 := b.NewValue0(v.Pos, OpCtz32, config.fe.TypeUInt32())
- v7 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpCtz32, fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v7.AddArg(x)
v6.AddArg(v7)
v2.AddArg(v6)
return true
}
}
-func rewriteValuedec64_OpEq64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpEq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Eq64 x y)
// cond:
// result: (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Eq32 (Int64Lo x) (Int64Lo y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpAndB)
- v0 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
return true
}
}
-func rewriteValuedec64_OpGeq64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpGeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq64 x y)
// cond:
// result: (OrB (Greater32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Geq32U (Int64Lo x) (Int64Lo y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpGreater32, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpGreater32, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v5 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAndB, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpGeq32U, config.fe.TypeBool())
- v8 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpGeq32U, fe.TypeBool())
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
return true
}
}
-func rewriteValuedec64_OpGeq64U(v *Value, config *Config) bool {
+func rewriteValuedec64_OpGeq64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Geq64U x y)
// cond:
// result: (OrB (Greater32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Geq32U (Int64Lo x) (Int64Lo y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpGreater32U, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpGreater32U, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v5 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAndB, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpGeq32U, config.fe.TypeBool())
- v8 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpGeq32U, fe.TypeBool())
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
return true
}
}
-func rewriteValuedec64_OpGreater64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpGreater64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater64 x y)
// cond:
// result: (OrB (Greater32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Greater32U (Int64Lo x) (Int64Lo y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpGreater32, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpGreater32, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v5 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAndB, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpGreater32U, config.fe.TypeBool())
- v8 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpGreater32U, fe.TypeBool())
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
return true
}
}
-func rewriteValuedec64_OpGreater64U(v *Value, config *Config) bool {
+func rewriteValuedec64_OpGreater64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Greater64U x y)
// cond:
// result: (OrB (Greater32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Greater32U (Int64Lo x) (Int64Lo y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpGreater32U, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpGreater32U, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v5 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAndB, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpGreater32U, config.fe.TypeBool())
- v8 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpGreater32U, fe.TypeBool())
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
return true
}
}
-func rewriteValuedec64_OpInt64Hi(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec64_OpInt64Hi(v *Value) bool {
// match: (Int64Hi (Int64Make hi _))
// cond:
// result: hi
}
return false
}
-func rewriteValuedec64_OpInt64Lo(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec64_OpInt64Lo(v *Value) bool {
// match: (Int64Lo (Int64Make _ lo))
// cond:
// result: lo
}
return false
}
-func rewriteValuedec64_OpLeq64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq64 x y)
// cond:
// result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpLess32, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpLess32, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v5 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAndB, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpLeq32U, config.fe.TypeBool())
- v8 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpLeq32U, fe.TypeBool())
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
return true
}
}
-func rewriteValuedec64_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLeq64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Leq64U x y)
// cond:
// result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpLess32U, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpLess32U, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v5 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAndB, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpLeq32U, config.fe.TypeBool())
- v8 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpLeq32U, fe.TypeBool())
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
return true
}
}
-func rewriteValuedec64_OpLess64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLess64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less64 x y)
// cond:
// result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpLess32, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpLess32, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v5 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAndB, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpLess32U, config.fe.TypeBool())
- v8 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpLess32U, fe.TypeBool())
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
return true
}
}
-func rewriteValuedec64_OpLess64U(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLess64U(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Less64U x y)
// cond:
// result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpLess32U, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpLess32U, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpAndB, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpEq32, config.fe.TypeBool())
- v5 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpAndB, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpEq32, fe.TypeBool())
+ v5 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
- v7 := b.NewValue0(v.Pos, OpLess32U, config.fe.TypeBool())
- v8 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpLess32U, fe.TypeBool())
+ v8 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v8.AddArg(x)
v7.AddArg(v8)
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(y)
v7.AddArg(v9)
v3.AddArg(v7)
return true
}
}
-func rewriteValuedec64_OpLoad(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLoad(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && !config.BigEndian && t.IsSigned()
- // result: (Int64Make (Load <config.fe.TypeInt32()> (OffPtr <config.fe.TypeInt32().PtrTo()> [4] ptr) mem) (Load <config.fe.TypeUInt32()> ptr mem))
+ // result: (Int64Make (Load <fe.TypeInt32()> (OffPtr <fe.TypeInt32().PtrTo()> [4] ptr) mem) (Load <fe.TypeUInt32()> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeInt32())
- v1 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeInt32().PtrTo())
+ v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt32().PtrTo())
v1.AuxInt = 4
v1.AddArg(ptr)
v0.AddArg(v1)
v0.AddArg(mem)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpLoad, fe.TypeUInt32())
v2.AddArg(ptr)
v2.AddArg(mem)
v.AddArg(v2)
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned()
- // result: (Int64Make (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem) (Load <config.fe.TypeUInt32()> ptr mem))
+ // result: (Int64Make (Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem) (Load <fe.TypeUInt32()> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeUInt32().PtrTo())
+ v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeUInt32().PtrTo())
v1.AuxInt = 4
v1.AddArg(ptr)
v0.AddArg(v1)
v0.AddArg(mem)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpLoad, fe.TypeUInt32())
v2.AddArg(ptr)
v2.AddArg(mem)
v.AddArg(v2)
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && config.BigEndian && t.IsSigned()
- // result: (Int64Make (Load <config.fe.TypeInt32()> ptr mem) (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
+ // result: (Int64Make (Load <fe.TypeInt32()> ptr mem) (Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt32())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeUInt32().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeUInt32().PtrTo())
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && config.BigEndian && !t.IsSigned()
- // result: (Int64Make (Load <config.fe.TypeUInt32()> ptr mem) (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
+ // result: (Int64Make (Load <fe.TypeUInt32()> ptr mem) (Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeUInt32())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpLoad, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpOffPtr, config.fe.TypeUInt32().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeUInt32().PtrTo())
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
}
return false
}
-func rewriteValuedec64_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
}
// match: (Lsh16x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Lsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Lsh16x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpLsh16x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
}
// match: (Lsh32x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Lsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Lsh32x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpLsh32x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpLsh64x16(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x16 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Lsh32x16 <config.fe.TypeUInt32()> hi s) (Rsh32Ux16 <config.fe.TypeUInt32()> lo (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) (Lsh32x16 <config.fe.TypeUInt32()> lo (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))) (Lsh32x16 <config.fe.TypeUInt32()> lo s))
+ // result: (Int64Make (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Lsh32x16 <fe.TypeUInt32()> hi s) (Rsh32Ux16 <fe.TypeUInt32()> lo (Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s))) (Lsh32x16 <fe.TypeUInt32()> lo (Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32])))) (Lsh32x16 <fe.TypeUInt32()> lo s))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpLsh32x16, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpLsh32x16, fe.TypeUInt32())
v2.AddArg(hi)
v2.AddArg(s)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux16, fe.TypeUInt32())
v3.AddArg(lo)
- v4 := b.NewValue0(v.Pos, OpSub16, config.fe.TypeUInt16())
- v5 := b.NewValue0(v.Pos, OpConst16, config.fe.TypeUInt16())
+ v4 := b.NewValue0(v.Pos, OpSub16, fe.TypeUInt16())
+ v5 := b.NewValue0(v.Pos, OpConst16, fe.TypeUInt16())
v5.AuxInt = 32
v4.AddArg(v5)
v4.AddArg(s)
v3.AddArg(v4)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpLsh32x16, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, fe.TypeUInt32())
v6.AddArg(lo)
- v7 := b.NewValue0(v.Pos, OpSub16, config.fe.TypeUInt16())
+ v7 := b.NewValue0(v.Pos, OpSub16, fe.TypeUInt16())
v7.AddArg(s)
- v8 := b.NewValue0(v.Pos, OpConst16, config.fe.TypeUInt16())
+ v8 := b.NewValue0(v.Pos, OpConst16, fe.TypeUInt16())
v8.AuxInt = 32
v7.AddArg(v8)
v6.AddArg(v7)
v0.AddArg(v6)
v.AddArg(v0)
- v9 := b.NewValue0(v.Pos, OpLsh32x16, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpLsh32x16, fe.TypeUInt32())
v9.AddArg(lo)
v9.AddArg(s)
v.AddArg(v9)
}
return false
}
-func rewriteValuedec64_OpLsh64x32(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLsh64x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x32 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Lsh32x32 <config.fe.TypeUInt32()> hi s) (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) (Lsh32x32 <config.fe.TypeUInt32()> lo (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))) (Lsh32x32 <config.fe.TypeUInt32()> lo s))
+ // result: (Int64Make (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Lsh32x32 <fe.TypeUInt32()> hi s) (Rsh32Ux32 <fe.TypeUInt32()> lo (Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s))) (Lsh32x32 <fe.TypeUInt32()> lo (Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32])))) (Lsh32x32 <fe.TypeUInt32()> lo s))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpLsh32x32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpLsh32x32, fe.TypeUInt32())
v2.AddArg(hi)
v2.AddArg(s)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux32, fe.TypeUInt32())
v3.AddArg(lo)
- v4 := b.NewValue0(v.Pos, OpSub32, config.fe.TypeUInt32())
- v5 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpSub32, fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v5.AuxInt = 32
v4.AddArg(v5)
v4.AddArg(s)
v3.AddArg(v4)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpLsh32x32, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, fe.TypeUInt32())
v6.AddArg(lo)
- v7 := b.NewValue0(v.Pos, OpSub32, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpSub32, fe.TypeUInt32())
v7.AddArg(s)
- v8 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v8.AuxInt = 32
v7.AddArg(v8)
v6.AddArg(v7)
v0.AddArg(v6)
v.AddArg(v0)
- v9 := b.NewValue0(v.Pos, OpLsh32x32, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpLsh32x32, fe.TypeUInt32())
v9.AddArg(lo)
v9.AddArg(s)
v.AddArg(v9)
}
return false
}
-func rewriteValuedec64_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLsh64x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const64 [0])
}
// match: (Lsh64x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Lsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Lsh64x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpLsh64x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x8 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Lsh32x8 <config.fe.TypeUInt32()> hi s) (Rsh32Ux8 <config.fe.TypeUInt32()> lo (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) (Lsh32x8 <config.fe.TypeUInt32()> lo (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))) (Lsh32x8 <config.fe.TypeUInt32()> lo s))
+ // result: (Int64Make (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Lsh32x8 <fe.TypeUInt32()> hi s) (Rsh32Ux8 <fe.TypeUInt32()> lo (Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s))) (Lsh32x8 <fe.TypeUInt32()> lo (Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32])))) (Lsh32x8 <fe.TypeUInt32()> lo s))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpLsh32x8, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpLsh32x8, fe.TypeUInt32())
v2.AddArg(hi)
v2.AddArg(s)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux8, fe.TypeUInt32())
v3.AddArg(lo)
- v4 := b.NewValue0(v.Pos, OpSub8, config.fe.TypeUInt8())
- v5 := b.NewValue0(v.Pos, OpConst8, config.fe.TypeUInt8())
+ v4 := b.NewValue0(v.Pos, OpSub8, fe.TypeUInt8())
+ v5 := b.NewValue0(v.Pos, OpConst8, fe.TypeUInt8())
v5.AuxInt = 32
v4.AddArg(v5)
v4.AddArg(s)
v3.AddArg(v4)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpLsh32x8, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, fe.TypeUInt32())
v6.AddArg(lo)
- v7 := b.NewValue0(v.Pos, OpSub8, config.fe.TypeUInt8())
+ v7 := b.NewValue0(v.Pos, OpSub8, fe.TypeUInt8())
v7.AddArg(s)
- v8 := b.NewValue0(v.Pos, OpConst8, config.fe.TypeUInt8())
+ v8 := b.NewValue0(v.Pos, OpConst8, fe.TypeUInt8())
v8.AuxInt = 32
v7.AddArg(v8)
v6.AddArg(v7)
v0.AddArg(v6)
v.AddArg(v0)
- v9 := b.NewValue0(v.Pos, OpLsh32x8, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpLsh32x8, fe.TypeUInt32())
v9.AddArg(lo)
v9.AddArg(s)
v.AddArg(v9)
}
return false
}
-func rewriteValuedec64_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpLsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
}
// match: (Lsh8x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Lsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Lsh8x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpLsh8x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpMul64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpMul64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul64 x y)
// cond:
- // result: (Int64Make (Add32 <config.fe.TypeUInt32()> (Mul32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Hi y)) (Add32 <config.fe.TypeUInt32()> (Mul32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Lo y)) (Select0 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+ // result: (Int64Make (Add32 <fe.TypeUInt32()> (Mul32 <fe.TypeUInt32()> (Int64Lo x) (Int64Hi y)) (Add32 <fe.TypeUInt32()> (Mul32 <fe.TypeUInt32()> (Int64Hi x) (Int64Lo y)) (Select0 <fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 <fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpAdd32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpAdd32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMul32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v2.AddArg(x)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v3.AddArg(y)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpAdd32, config.fe.TypeUInt32())
- v5 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeUInt32())
- v6 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpAdd32, fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpMul32, fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v6.AddArg(x)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v7.AddArg(y)
v5.AddArg(v7)
v4.AddArg(v5)
- v8 := b.NewValue0(v.Pos, OpSelect0, config.fe.TypeUInt32())
- v9 := b.NewValue0(v.Pos, OpMul32uhilo, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v10 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpSelect0, fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpMul32uhilo, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
+ v10 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v10.AddArg(x)
v9.AddArg(v10)
- v11 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v11 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v11.AddArg(y)
v9.AddArg(v11)
v8.AddArg(v9)
v4.AddArg(v8)
v0.AddArg(v4)
v.AddArg(v0)
- v12 := b.NewValue0(v.Pos, OpSelect1, config.fe.TypeUInt32())
- v13 := b.NewValue0(v.Pos, OpMul32uhilo, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
- v14 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v12 := b.NewValue0(v.Pos, OpSelect1, fe.TypeUInt32())
+ v13 := b.NewValue0(v.Pos, OpMul32uhilo, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
+ v14 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v14.AddArg(x)
v13.AddArg(v14)
- v15 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v15 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v15.AddArg(y)
v13.AddArg(v15)
v12.AddArg(v13)
return true
}
}
-func rewriteValuedec64_OpNeg64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpNeg64(v *Value) bool {
b := v.Block
_ = b
// match: (Neg64 <t> x)
return true
}
}
-func rewriteValuedec64_OpNeq64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpNeq64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Neq64 x y)
// cond:
// result: (OrB (Neq32 (Int64Hi x) (Int64Hi y)) (Neq32 (Int64Lo x) (Int64Lo y)))
x := v.Args[0]
y := v.Args[1]
v.reset(OpOrB)
- v0 := b.NewValue0(v.Pos, OpNeq32, config.fe.TypeBool())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpNeq32, fe.TypeBool())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpNeq32, config.fe.TypeBool())
- v4 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpNeq32, fe.TypeBool())
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
return true
}
}
-func rewriteValuedec64_OpOr64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpOr64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Or64 x y)
// cond:
- // result: (Int64Make (Or32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (Or32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ // result: (Int64Make (Or32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (Or32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
return true
}
}
-func rewriteValuedec64_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
}
// match: (Rsh16Ux64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh16Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Rsh16Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh16Ux32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x64 x (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Signmask (SignExt16to32 x))
break
}
v.reset(OpSignmask)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh16x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Rsh16x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh16x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh32Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
}
// match: (Rsh32Ux64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh32Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Rsh32Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh32Ux32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x64 x (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Signmask x)
}
// match: (Rsh32x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Rsh32x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh32x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh64Ux16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux16 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32Ux16 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux16 <config.fe.TypeUInt32()> lo s) (Lsh32x16 <config.fe.TypeUInt32()> hi (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) (Rsh32Ux16 <config.fe.TypeUInt32()> hi (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))))
+ // result: (Int64Make (Rsh32Ux16 <fe.TypeUInt32()> hi s) (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Rsh32Ux16 <fe.TypeUInt32()> lo s) (Lsh32x16 <fe.TypeUInt32()> hi (Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s))) (Rsh32Ux16 <fe.TypeUInt32()> hi (Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32])))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux16, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux16, fe.TypeUInt32())
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux16, fe.TypeUInt32())
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x16, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpLsh32x16, fe.TypeUInt32())
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub16, config.fe.TypeUInt16())
- v6 := b.NewValue0(v.Pos, OpConst16, config.fe.TypeUInt16())
+ v5 := b.NewValue0(v.Pos, OpSub16, fe.TypeUInt16())
+ v6 := b.NewValue0(v.Pos, OpConst16, fe.TypeUInt16())
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux16, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpRsh32Ux16, fe.TypeUInt32())
v7.AddArg(hi)
- v8 := b.NewValue0(v.Pos, OpSub16, config.fe.TypeUInt16())
+ v8 := b.NewValue0(v.Pos, OpSub16, fe.TypeUInt16())
v8.AddArg(s)
- v9 := b.NewValue0(v.Pos, OpConst16, config.fe.TypeUInt16())
+ v9 := b.NewValue0(v.Pos, OpConst16, fe.TypeUInt16())
v9.AuxInt = 32
v8.AddArg(v9)
v7.AddArg(v8)
}
return false
}
-func rewriteValuedec64_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh64Ux32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux32 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32Ux32 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux32 <config.fe.TypeUInt32()> lo s) (Lsh32x32 <config.fe.TypeUInt32()> hi (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))))
+ // result: (Int64Make (Rsh32Ux32 <fe.TypeUInt32()> hi s) (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Rsh32Ux32 <fe.TypeUInt32()> lo s) (Lsh32x32 <fe.TypeUInt32()> hi (Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s))) (Rsh32Ux32 <fe.TypeUInt32()> hi (Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32])))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux32, fe.TypeUInt32())
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux32, fe.TypeUInt32())
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpLsh32x32, fe.TypeUInt32())
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub32, config.fe.TypeUInt32())
- v6 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpSub32, fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux32, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpRsh32Ux32, fe.TypeUInt32())
v7.AddArg(hi)
- v8 := b.NewValue0(v.Pos, OpSub32, config.fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpSub32, fe.TypeUInt32())
v8.AddArg(s)
- v9 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v9.AuxInt = 32
v8.AddArg(v9)
v7.AddArg(v8)
}
return false
}
-func rewriteValuedec64_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh64Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const64 [0])
}
// match: (Rsh64Ux64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh64Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Rsh64Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh64Ux32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh64Ux8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux8 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32Ux8 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux8 <config.fe.TypeUInt32()> lo s) (Lsh32x8 <config.fe.TypeUInt32()> hi (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) (Rsh32Ux8 <config.fe.TypeUInt32()> hi (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))))
+ // result: (Int64Make (Rsh32Ux8 <fe.TypeUInt32()> hi s) (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Rsh32Ux8 <fe.TypeUInt32()> lo s) (Lsh32x8 <fe.TypeUInt32()> hi (Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s))) (Rsh32Ux8 <fe.TypeUInt32()> hi (Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32])))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux8, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux8, fe.TypeUInt32())
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux8, fe.TypeUInt32())
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x8, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpLsh32x8, fe.TypeUInt32())
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub8, config.fe.TypeUInt8())
- v6 := b.NewValue0(v.Pos, OpConst8, config.fe.TypeUInt8())
+ v5 := b.NewValue0(v.Pos, OpSub8, fe.TypeUInt8())
+ v6 := b.NewValue0(v.Pos, OpConst8, fe.TypeUInt8())
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpRsh32Ux8, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpRsh32Ux8, fe.TypeUInt32())
v7.AddArg(hi)
- v8 := b.NewValue0(v.Pos, OpSub8, config.fe.TypeUInt8())
+ v8 := b.NewValue0(v.Pos, OpSub8, fe.TypeUInt8())
v8.AddArg(s)
- v9 := b.NewValue0(v.Pos, OpConst8, config.fe.TypeUInt8())
+ v9 := b.NewValue0(v.Pos, OpConst8, fe.TypeUInt8())
v9.AuxInt = 32
v8.AddArg(v9)
v7.AddArg(v8)
}
return false
}
-func rewriteValuedec64_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh64x16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x16 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32x16 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux16 <config.fe.TypeUInt32()> lo s) (Lsh32x16 <config.fe.TypeUInt32()> hi (Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) (And32 <config.fe.TypeUInt32()> (Rsh32x16 <config.fe.TypeUInt32()> hi (Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <config.fe.TypeUInt16()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
+ // result: (Int64Make (Rsh32x16 <fe.TypeUInt32()> hi s) (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Rsh32Ux16 <fe.TypeUInt32()> lo s) (Lsh32x16 <fe.TypeUInt32()> hi (Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s))) (And32 <fe.TypeUInt32()> (Rsh32x16 <fe.TypeUInt32()> hi (Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <fe.TypeUInt16()> s (Const32 <fe.TypeUInt32()> [5])))))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32x16, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32x16, fe.TypeUInt32())
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpRsh32Ux16, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux16, fe.TypeUInt32())
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x16, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpLsh32x16, fe.TypeUInt32())
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub16, config.fe.TypeUInt16())
- v6 := b.NewValue0(v.Pos, OpConst16, config.fe.TypeUInt16())
+ v5 := b.NewValue0(v.Pos, OpSub16, fe.TypeUInt16())
+ v6 := b.NewValue0(v.Pos, OpConst16, fe.TypeUInt16())
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpAnd32, config.fe.TypeUInt32())
- v8 := b.NewValue0(v.Pos, OpRsh32x16, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpAnd32, fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpRsh32x16, fe.TypeUInt32())
v8.AddArg(hi)
- v9 := b.NewValue0(v.Pos, OpSub16, config.fe.TypeUInt16())
+ v9 := b.NewValue0(v.Pos, OpSub16, fe.TypeUInt16())
v9.AddArg(s)
- v10 := b.NewValue0(v.Pos, OpConst16, config.fe.TypeUInt16())
+ v10 := b.NewValue0(v.Pos, OpConst16, fe.TypeUInt16())
v10.AuxInt = 32
v9.AddArg(v10)
v8.AddArg(v9)
v7.AddArg(v8)
- v11 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
- v12 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
- v13 := b.NewValue0(v.Pos, OpRsh16Ux32, config.fe.TypeUInt16())
+ v11 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
+ v12 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
+ v13 := b.NewValue0(v.Pos, OpRsh16Ux32, fe.TypeUInt16())
v13.AddArg(s)
- v14 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v14 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v14.AuxInt = 5
v13.AddArg(v14)
v12.AddArg(v13)
}
return false
}
-func rewriteValuedec64_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh64x32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x32 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32x32 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux32 <config.fe.TypeUInt32()> lo s) (Lsh32x32 <config.fe.TypeUInt32()> hi (Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) (And32 <config.fe.TypeUInt32()> (Rsh32x32 <config.fe.TypeUInt32()> hi (Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32]))) (Zeromask (Rsh32Ux32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [5]))))))
+ // result: (Int64Make (Rsh32x32 <fe.TypeUInt32()> hi s) (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Rsh32Ux32 <fe.TypeUInt32()> lo s) (Lsh32x32 <fe.TypeUInt32()> hi (Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s))) (And32 <fe.TypeUInt32()> (Rsh32x32 <fe.TypeUInt32()> hi (Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32]))) (Zeromask (Rsh32Ux32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [5]))))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32x32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32x32, fe.TypeUInt32())
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpRsh32Ux32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux32, fe.TypeUInt32())
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x32, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpLsh32x32, fe.TypeUInt32())
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub32, config.fe.TypeUInt32())
- v6 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpSub32, fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpAnd32, config.fe.TypeUInt32())
- v8 := b.NewValue0(v.Pos, OpRsh32x32, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpAnd32, fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpRsh32x32, fe.TypeUInt32())
v8.AddArg(hi)
- v9 := b.NewValue0(v.Pos, OpSub32, config.fe.TypeUInt32())
+ v9 := b.NewValue0(v.Pos, OpSub32, fe.TypeUInt32())
v9.AddArg(s)
- v10 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v10 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v10.AuxInt = 32
v9.AddArg(v10)
v8.AddArg(v9)
v7.AddArg(v8)
- v11 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
- v12 := b.NewValue0(v.Pos, OpRsh32Ux32, config.fe.TypeUInt32())
+ v11 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
+ v12 := b.NewValue0(v.Pos, OpRsh32Ux32, fe.TypeUInt32())
v12.AddArg(s)
- v13 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v13 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v13.AuxInt = 5
v12.AddArg(v13)
v11.AddArg(v12)
}
return false
}
-func rewriteValuedec64_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh64x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x64 x (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
break
}
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
- v3 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v3.AddArg(x)
v2.AddArg(v3)
v.AddArg(v2)
}
// match: (Rsh64x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Rsh64x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh64x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh64x8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x8 (Int64Make hi lo) s)
// cond:
- // result: (Int64Make (Rsh32x8 <config.fe.TypeUInt32()> hi s) (Or32 <config.fe.TypeUInt32()> (Or32 <config.fe.TypeUInt32()> (Rsh32Ux8 <config.fe.TypeUInt32()> lo s) (Lsh32x8 <config.fe.TypeUInt32()> hi (Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) (And32 <config.fe.TypeUInt32()> (Rsh32x8 <config.fe.TypeUInt32()> hi (Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <config.fe.TypeUInt8()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
+ // result: (Int64Make (Rsh32x8 <fe.TypeUInt32()> hi s) (Or32 <fe.TypeUInt32()> (Or32 <fe.TypeUInt32()> (Rsh32Ux8 <fe.TypeUInt32()> lo s) (Lsh32x8 <fe.TypeUInt32()> hi (Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s))) (And32 <fe.TypeUInt32()> (Rsh32x8 <fe.TypeUInt32()> hi (Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <fe.TypeUInt8()> s (Const32 <fe.TypeUInt32()> [5])))))))
for {
v_0 := v.Args[0]
if v_0.Op != OpInt64Make {
lo := v_0.Args[1]
s := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpRsh32x8, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32x8, fe.TypeUInt32())
v0.AddArg(hi)
v0.AddArg(s)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpRsh32Ux8, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux8, fe.TypeUInt32())
v3.AddArg(lo)
v3.AddArg(s)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpLsh32x8, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpLsh32x8, fe.TypeUInt32())
v4.AddArg(hi)
- v5 := b.NewValue0(v.Pos, OpSub8, config.fe.TypeUInt8())
- v6 := b.NewValue0(v.Pos, OpConst8, config.fe.TypeUInt8())
+ v5 := b.NewValue0(v.Pos, OpSub8, fe.TypeUInt8())
+ v6 := b.NewValue0(v.Pos, OpConst8, fe.TypeUInt8())
v6.AuxInt = 32
v5.AddArg(v6)
v5.AddArg(s)
v4.AddArg(v5)
v2.AddArg(v4)
v1.AddArg(v2)
- v7 := b.NewValue0(v.Pos, OpAnd32, config.fe.TypeUInt32())
- v8 := b.NewValue0(v.Pos, OpRsh32x8, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpAnd32, fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpRsh32x8, fe.TypeUInt32())
v8.AddArg(hi)
- v9 := b.NewValue0(v.Pos, OpSub8, config.fe.TypeUInt8())
+ v9 := b.NewValue0(v.Pos, OpSub8, fe.TypeUInt8())
v9.AddArg(s)
- v10 := b.NewValue0(v.Pos, OpConst8, config.fe.TypeUInt8())
+ v10 := b.NewValue0(v.Pos, OpConst8, fe.TypeUInt8())
v10.AuxInt = 32
v9.AddArg(v10)
v8.AddArg(v9)
v7.AddArg(v8)
- v11 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
- v12 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
- v13 := b.NewValue0(v.Pos, OpRsh8Ux32, config.fe.TypeUInt8())
+ v11 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
+ v12 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
+ v13 := b.NewValue0(v.Pos, OpRsh8Ux32, fe.TypeUInt8())
v13.AddArg(s)
- v14 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v14 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v14.AuxInt = 5
v13.AddArg(v14)
v12.AddArg(v13)
}
return false
}
-func rewriteValuedec64_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Const32 [0])
}
// match: (Rsh8Ux64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh8Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Rsh8Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh8Ux32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8x64 x (Int64Make (Const32 [c]) _))
// cond: c != 0
// result: (Signmask (SignExt8to32 x))
break
}
v.reset(OpSignmask)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh8x64 x (Int64Make hi lo))
// cond: hi.Op != OpConst32
- // result: (Rsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
+ // result: (Rsh8x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
for {
x := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh8x32)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpOr32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpZeromask, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpOr32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpZeromask, fe.TypeUInt32())
v1.AddArg(hi)
v0.AddArg(v1)
v0.AddArg(lo)
}
return false
}
-func rewriteValuedec64_OpSignExt16to64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpSignExt16to64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (SignExt16to64 x)
// cond:
// result: (SignExt32to64 (SignExt16to32 x))
for {
x := v.Args[0]
v.reset(OpSignExt32to64)
- v0 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValuedec64_OpSignExt32to64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpSignExt32to64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (SignExt32to64 x)
// cond:
// result: (Int64Make (Signmask x) x)
for {
x := v.Args[0]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpSignmask, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignmask, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
v.AddArg(x)
return true
}
}
-func rewriteValuedec64_OpSignExt8to64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpSignExt8to64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (SignExt8to64 x)
// cond:
// result: (SignExt32to64 (SignExt8to32 x))
for {
x := v.Args[0]
v.reset(OpSignExt32to64)
- v0 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValuedec64_OpStore(v *Value, config *Config) bool {
+func rewriteValuedec64_OpStore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (Store {t} dst (Int64Make hi lo) mem)
// cond: t.(Type).Size() == 8 && !config.BigEndian
// result: (Store {hi.Type} (OffPtr <hi.Type.PtrTo()> [4] dst) hi (Store {lo.Type} dst lo mem))
}
return false
}
-func rewriteValuedec64_OpSub64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpSub64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Sub64 x y)
// cond:
- // result: (Int64Make (Sub32withcarry <config.fe.TypeInt32()> (Int64Hi x) (Int64Hi y) (Select1 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 <config.fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ // result: (Int64Make (Sub32withcarry <fe.TypeInt32()> (Int64Hi x) (Int64Hi y) (Select1 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 <fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpSub32withcarry, config.fe.TypeInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpSub32withcarry, fe.TypeInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
- v4 := b.NewValue0(v.Pos, OpSub32carry, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
- v5 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpSub32carry, MakeTuple(fe.TypeUInt32(), TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v5.AddArg(x)
v4.AddArg(v5)
- v6 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v6.AddArg(y)
v4.AddArg(v6)
v3.AddArg(v4)
v0.AddArg(v3)
v.AddArg(v0)
- v7 := b.NewValue0(v.Pos, OpSelect0, config.fe.TypeUInt32())
- v8 := b.NewValue0(v.Pos, OpSub32carry, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
- v9 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpSelect0, fe.TypeUInt32())
+ v8 := b.NewValue0(v.Pos, OpSub32carry, MakeTuple(fe.TypeUInt32(), TypeFlags))
+ v9 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v9.AddArg(x)
v8.AddArg(v9)
- v10 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v10 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v10.AddArg(y)
v8.AddArg(v10)
v7.AddArg(v8)
return true
}
}
-func rewriteValuedec64_OpTrunc64to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec64_OpTrunc64to16(v *Value) bool {
// match: (Trunc64to16 (Int64Make _ lo))
// cond:
// result: (Trunc32to16 lo)
}
return false
}
-func rewriteValuedec64_OpTrunc64to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec64_OpTrunc64to32(v *Value) bool {
// match: (Trunc64to32 (Int64Make _ lo))
// cond:
// result: lo
}
return false
}
-func rewriteValuedec64_OpTrunc64to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuedec64_OpTrunc64to8(v *Value) bool {
// match: (Trunc64to8 (Int64Make _ lo))
// cond:
// result: (Trunc32to8 lo)
}
return false
}
-func rewriteValuedec64_OpXor64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpXor64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Xor64 x y)
// cond:
- // result: (Int64Make (Xor32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (Xor32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
+ // result: (Int64Make (Xor32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) (Xor32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
for {
x := v.Args[0]
y := v.Args[1]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpXor32, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpXor32, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v1.AddArg(x)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpInt64Hi, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, fe.TypeUInt32())
v2.AddArg(y)
v0.AddArg(v2)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpXor32, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpXor32, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpInt64Lo, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, fe.TypeUInt32())
v5.AddArg(y)
v3.AddArg(v5)
v.AddArg(v3)
return true
}
}
-func rewriteValuedec64_OpZeroExt16to64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpZeroExt16to64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ZeroExt16to64 x)
// cond:
// result: (ZeroExt32to64 (ZeroExt16to32 x))
for {
x := v.Args[0]
v.reset(OpZeroExt32to64)
- v0 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteValuedec64_OpZeroExt32to64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpZeroExt32to64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ZeroExt32to64 x)
// cond:
- // result: (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) x)
+ // result: (Int64Make (Const32 <fe.TypeUInt32()> [0]) x)
for {
x := v.Args[0]
v.reset(OpInt64Make)
- v0 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v0.AuxInt = 0
v.AddArg(v0)
v.AddArg(x)
return true
}
}
-func rewriteValuedec64_OpZeroExt8to64(v *Value, config *Config) bool {
+func rewriteValuedec64_OpZeroExt8to64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ZeroExt8to64 x)
// cond:
// result: (ZeroExt32to64 (ZeroExt8to32 x))
for {
x := v.Args[0]
v.reset(OpZeroExt32to64)
- v0 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
}
-func rewriteBlockdec64(b *Block, config *Config) bool {
+func rewriteBlockdec64(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
}
return false
import "math"
var _ = math.MinInt8 // in case not otherwise used
-func rewriteValuegeneric(v *Value, config *Config) bool {
+func rewriteValuegeneric(v *Value) bool {
switch v.Op {
case OpAdd16:
- return rewriteValuegeneric_OpAdd16(v, config)
+ return rewriteValuegeneric_OpAdd16(v)
case OpAdd32:
- return rewriteValuegeneric_OpAdd32(v, config)
+ return rewriteValuegeneric_OpAdd32(v)
case OpAdd32F:
- return rewriteValuegeneric_OpAdd32F(v, config)
+ return rewriteValuegeneric_OpAdd32F(v)
case OpAdd64:
- return rewriteValuegeneric_OpAdd64(v, config)
+ return rewriteValuegeneric_OpAdd64(v)
case OpAdd64F:
- return rewriteValuegeneric_OpAdd64F(v, config)
+ return rewriteValuegeneric_OpAdd64F(v)
case OpAdd8:
- return rewriteValuegeneric_OpAdd8(v, config)
+ return rewriteValuegeneric_OpAdd8(v)
case OpAddPtr:
- return rewriteValuegeneric_OpAddPtr(v, config)
+ return rewriteValuegeneric_OpAddPtr(v)
case OpAnd16:
- return rewriteValuegeneric_OpAnd16(v, config)
+ return rewriteValuegeneric_OpAnd16(v)
case OpAnd32:
- return rewriteValuegeneric_OpAnd32(v, config)
+ return rewriteValuegeneric_OpAnd32(v)
case OpAnd64:
- return rewriteValuegeneric_OpAnd64(v, config)
+ return rewriteValuegeneric_OpAnd64(v)
case OpAnd8:
- return rewriteValuegeneric_OpAnd8(v, config)
+ return rewriteValuegeneric_OpAnd8(v)
case OpArg:
- return rewriteValuegeneric_OpArg(v, config)
+ return rewriteValuegeneric_OpArg(v)
case OpArraySelect:
- return rewriteValuegeneric_OpArraySelect(v, config)
+ return rewriteValuegeneric_OpArraySelect(v)
case OpCom16:
- return rewriteValuegeneric_OpCom16(v, config)
+ return rewriteValuegeneric_OpCom16(v)
case OpCom32:
- return rewriteValuegeneric_OpCom32(v, config)
+ return rewriteValuegeneric_OpCom32(v)
case OpCom64:
- return rewriteValuegeneric_OpCom64(v, config)
+ return rewriteValuegeneric_OpCom64(v)
case OpCom8:
- return rewriteValuegeneric_OpCom8(v, config)
+ return rewriteValuegeneric_OpCom8(v)
case OpConstInterface:
- return rewriteValuegeneric_OpConstInterface(v, config)
+ return rewriteValuegeneric_OpConstInterface(v)
case OpConstSlice:
- return rewriteValuegeneric_OpConstSlice(v, config)
+ return rewriteValuegeneric_OpConstSlice(v)
case OpConstString:
- return rewriteValuegeneric_OpConstString(v, config)
+ return rewriteValuegeneric_OpConstString(v)
case OpConvert:
- return rewriteValuegeneric_OpConvert(v, config)
+ return rewriteValuegeneric_OpConvert(v)
case OpCvt32Fto64F:
- return rewriteValuegeneric_OpCvt32Fto64F(v, config)
+ return rewriteValuegeneric_OpCvt32Fto64F(v)
case OpCvt64Fto32F:
- return rewriteValuegeneric_OpCvt64Fto32F(v, config)
+ return rewriteValuegeneric_OpCvt64Fto32F(v)
case OpDiv16:
- return rewriteValuegeneric_OpDiv16(v, config)
+ return rewriteValuegeneric_OpDiv16(v)
case OpDiv16u:
- return rewriteValuegeneric_OpDiv16u(v, config)
+ return rewriteValuegeneric_OpDiv16u(v)
case OpDiv32:
- return rewriteValuegeneric_OpDiv32(v, config)
+ return rewriteValuegeneric_OpDiv32(v)
case OpDiv32F:
- return rewriteValuegeneric_OpDiv32F(v, config)
+ return rewriteValuegeneric_OpDiv32F(v)
case OpDiv32u:
- return rewriteValuegeneric_OpDiv32u(v, config)
+ return rewriteValuegeneric_OpDiv32u(v)
case OpDiv64:
- return rewriteValuegeneric_OpDiv64(v, config)
+ return rewriteValuegeneric_OpDiv64(v)
case OpDiv64F:
- return rewriteValuegeneric_OpDiv64F(v, config)
+ return rewriteValuegeneric_OpDiv64F(v)
case OpDiv64u:
- return rewriteValuegeneric_OpDiv64u(v, config)
+ return rewriteValuegeneric_OpDiv64u(v)
case OpDiv8:
- return rewriteValuegeneric_OpDiv8(v, config)
+ return rewriteValuegeneric_OpDiv8(v)
case OpDiv8u:
- return rewriteValuegeneric_OpDiv8u(v, config)
+ return rewriteValuegeneric_OpDiv8u(v)
case OpEq16:
- return rewriteValuegeneric_OpEq16(v, config)
+ return rewriteValuegeneric_OpEq16(v)
case OpEq32:
- return rewriteValuegeneric_OpEq32(v, config)
+ return rewriteValuegeneric_OpEq32(v)
case OpEq64:
- return rewriteValuegeneric_OpEq64(v, config)
+ return rewriteValuegeneric_OpEq64(v)
case OpEq8:
- return rewriteValuegeneric_OpEq8(v, config)
+ return rewriteValuegeneric_OpEq8(v)
case OpEqB:
- return rewriteValuegeneric_OpEqB(v, config)
+ return rewriteValuegeneric_OpEqB(v)
case OpEqInter:
- return rewriteValuegeneric_OpEqInter(v, config)
+ return rewriteValuegeneric_OpEqInter(v)
case OpEqPtr:
- return rewriteValuegeneric_OpEqPtr(v, config)
+ return rewriteValuegeneric_OpEqPtr(v)
case OpEqSlice:
- return rewriteValuegeneric_OpEqSlice(v, config)
+ return rewriteValuegeneric_OpEqSlice(v)
case OpGeq16:
- return rewriteValuegeneric_OpGeq16(v, config)
+ return rewriteValuegeneric_OpGeq16(v)
case OpGeq16U:
- return rewriteValuegeneric_OpGeq16U(v, config)
+ return rewriteValuegeneric_OpGeq16U(v)
case OpGeq32:
- return rewriteValuegeneric_OpGeq32(v, config)
+ return rewriteValuegeneric_OpGeq32(v)
case OpGeq32U:
- return rewriteValuegeneric_OpGeq32U(v, config)
+ return rewriteValuegeneric_OpGeq32U(v)
case OpGeq64:
- return rewriteValuegeneric_OpGeq64(v, config)
+ return rewriteValuegeneric_OpGeq64(v)
case OpGeq64U:
- return rewriteValuegeneric_OpGeq64U(v, config)
+ return rewriteValuegeneric_OpGeq64U(v)
case OpGeq8:
- return rewriteValuegeneric_OpGeq8(v, config)
+ return rewriteValuegeneric_OpGeq8(v)
case OpGeq8U:
- return rewriteValuegeneric_OpGeq8U(v, config)
+ return rewriteValuegeneric_OpGeq8U(v)
case OpGreater16:
- return rewriteValuegeneric_OpGreater16(v, config)
+ return rewriteValuegeneric_OpGreater16(v)
case OpGreater16U:
- return rewriteValuegeneric_OpGreater16U(v, config)
+ return rewriteValuegeneric_OpGreater16U(v)
case OpGreater32:
- return rewriteValuegeneric_OpGreater32(v, config)
+ return rewriteValuegeneric_OpGreater32(v)
case OpGreater32U:
- return rewriteValuegeneric_OpGreater32U(v, config)
+ return rewriteValuegeneric_OpGreater32U(v)
case OpGreater64:
- return rewriteValuegeneric_OpGreater64(v, config)
+ return rewriteValuegeneric_OpGreater64(v)
case OpGreater64U:
- return rewriteValuegeneric_OpGreater64U(v, config)
+ return rewriteValuegeneric_OpGreater64U(v)
case OpGreater8:
- return rewriteValuegeneric_OpGreater8(v, config)
+ return rewriteValuegeneric_OpGreater8(v)
case OpGreater8U:
- return rewriteValuegeneric_OpGreater8U(v, config)
+ return rewriteValuegeneric_OpGreater8U(v)
case OpIMake:
- return rewriteValuegeneric_OpIMake(v, config)
+ return rewriteValuegeneric_OpIMake(v)
case OpInterCall:
- return rewriteValuegeneric_OpInterCall(v, config)
+ return rewriteValuegeneric_OpInterCall(v)
case OpIsInBounds:
- return rewriteValuegeneric_OpIsInBounds(v, config)
+ return rewriteValuegeneric_OpIsInBounds(v)
case OpIsNonNil:
- return rewriteValuegeneric_OpIsNonNil(v, config)
+ return rewriteValuegeneric_OpIsNonNil(v)
case OpIsSliceInBounds:
- return rewriteValuegeneric_OpIsSliceInBounds(v, config)
+ return rewriteValuegeneric_OpIsSliceInBounds(v)
case OpLeq16:
- return rewriteValuegeneric_OpLeq16(v, config)
+ return rewriteValuegeneric_OpLeq16(v)
case OpLeq16U:
- return rewriteValuegeneric_OpLeq16U(v, config)
+ return rewriteValuegeneric_OpLeq16U(v)
case OpLeq32:
- return rewriteValuegeneric_OpLeq32(v, config)
+ return rewriteValuegeneric_OpLeq32(v)
case OpLeq32U:
- return rewriteValuegeneric_OpLeq32U(v, config)
+ return rewriteValuegeneric_OpLeq32U(v)
case OpLeq64:
- return rewriteValuegeneric_OpLeq64(v, config)
+ return rewriteValuegeneric_OpLeq64(v)
case OpLeq64U:
- return rewriteValuegeneric_OpLeq64U(v, config)
+ return rewriteValuegeneric_OpLeq64U(v)
case OpLeq8:
- return rewriteValuegeneric_OpLeq8(v, config)
+ return rewriteValuegeneric_OpLeq8(v)
case OpLeq8U:
- return rewriteValuegeneric_OpLeq8U(v, config)
+ return rewriteValuegeneric_OpLeq8U(v)
case OpLess16:
- return rewriteValuegeneric_OpLess16(v, config)
+ return rewriteValuegeneric_OpLess16(v)
case OpLess16U:
- return rewriteValuegeneric_OpLess16U(v, config)
+ return rewriteValuegeneric_OpLess16U(v)
case OpLess32:
- return rewriteValuegeneric_OpLess32(v, config)
+ return rewriteValuegeneric_OpLess32(v)
case OpLess32U:
- return rewriteValuegeneric_OpLess32U(v, config)
+ return rewriteValuegeneric_OpLess32U(v)
case OpLess64:
- return rewriteValuegeneric_OpLess64(v, config)
+ return rewriteValuegeneric_OpLess64(v)
case OpLess64U:
- return rewriteValuegeneric_OpLess64U(v, config)
+ return rewriteValuegeneric_OpLess64U(v)
case OpLess8:
- return rewriteValuegeneric_OpLess8(v, config)
+ return rewriteValuegeneric_OpLess8(v)
case OpLess8U:
- return rewriteValuegeneric_OpLess8U(v, config)
+ return rewriteValuegeneric_OpLess8U(v)
case OpLoad:
- return rewriteValuegeneric_OpLoad(v, config)
+ return rewriteValuegeneric_OpLoad(v)
case OpLsh16x16:
- return rewriteValuegeneric_OpLsh16x16(v, config)
+ return rewriteValuegeneric_OpLsh16x16(v)
case OpLsh16x32:
- return rewriteValuegeneric_OpLsh16x32(v, config)
+ return rewriteValuegeneric_OpLsh16x32(v)
case OpLsh16x64:
- return rewriteValuegeneric_OpLsh16x64(v, config)
+ return rewriteValuegeneric_OpLsh16x64(v)
case OpLsh16x8:
- return rewriteValuegeneric_OpLsh16x8(v, config)
+ return rewriteValuegeneric_OpLsh16x8(v)
case OpLsh32x16:
- return rewriteValuegeneric_OpLsh32x16(v, config)
+ return rewriteValuegeneric_OpLsh32x16(v)
case OpLsh32x32:
- return rewriteValuegeneric_OpLsh32x32(v, config)
+ return rewriteValuegeneric_OpLsh32x32(v)
case OpLsh32x64:
- return rewriteValuegeneric_OpLsh32x64(v, config)
+ return rewriteValuegeneric_OpLsh32x64(v)
case OpLsh32x8:
- return rewriteValuegeneric_OpLsh32x8(v, config)
+ return rewriteValuegeneric_OpLsh32x8(v)
case OpLsh64x16:
- return rewriteValuegeneric_OpLsh64x16(v, config)
+ return rewriteValuegeneric_OpLsh64x16(v)
case OpLsh64x32:
- return rewriteValuegeneric_OpLsh64x32(v, config)
+ return rewriteValuegeneric_OpLsh64x32(v)
case OpLsh64x64:
- return rewriteValuegeneric_OpLsh64x64(v, config)
+ return rewriteValuegeneric_OpLsh64x64(v)
case OpLsh64x8:
- return rewriteValuegeneric_OpLsh64x8(v, config)
+ return rewriteValuegeneric_OpLsh64x8(v)
case OpLsh8x16:
- return rewriteValuegeneric_OpLsh8x16(v, config)
+ return rewriteValuegeneric_OpLsh8x16(v)
case OpLsh8x32:
- return rewriteValuegeneric_OpLsh8x32(v, config)
+ return rewriteValuegeneric_OpLsh8x32(v)
case OpLsh8x64:
- return rewriteValuegeneric_OpLsh8x64(v, config)
+ return rewriteValuegeneric_OpLsh8x64(v)
case OpLsh8x8:
- return rewriteValuegeneric_OpLsh8x8(v, config)
+ return rewriteValuegeneric_OpLsh8x8(v)
case OpMod16:
- return rewriteValuegeneric_OpMod16(v, config)
+ return rewriteValuegeneric_OpMod16(v)
case OpMod16u:
- return rewriteValuegeneric_OpMod16u(v, config)
+ return rewriteValuegeneric_OpMod16u(v)
case OpMod32:
- return rewriteValuegeneric_OpMod32(v, config)
+ return rewriteValuegeneric_OpMod32(v)
case OpMod32u:
- return rewriteValuegeneric_OpMod32u(v, config)
+ return rewriteValuegeneric_OpMod32u(v)
case OpMod64:
- return rewriteValuegeneric_OpMod64(v, config)
+ return rewriteValuegeneric_OpMod64(v)
case OpMod64u:
- return rewriteValuegeneric_OpMod64u(v, config)
+ return rewriteValuegeneric_OpMod64u(v)
case OpMod8:
- return rewriteValuegeneric_OpMod8(v, config)
+ return rewriteValuegeneric_OpMod8(v)
case OpMod8u:
- return rewriteValuegeneric_OpMod8u(v, config)
+ return rewriteValuegeneric_OpMod8u(v)
case OpMul16:
- return rewriteValuegeneric_OpMul16(v, config)
+ return rewriteValuegeneric_OpMul16(v)
case OpMul32:
- return rewriteValuegeneric_OpMul32(v, config)
+ return rewriteValuegeneric_OpMul32(v)
case OpMul32F:
- return rewriteValuegeneric_OpMul32F(v, config)
+ return rewriteValuegeneric_OpMul32F(v)
case OpMul64:
- return rewriteValuegeneric_OpMul64(v, config)
+ return rewriteValuegeneric_OpMul64(v)
case OpMul64F:
- return rewriteValuegeneric_OpMul64F(v, config)
+ return rewriteValuegeneric_OpMul64F(v)
case OpMul8:
- return rewriteValuegeneric_OpMul8(v, config)
+ return rewriteValuegeneric_OpMul8(v)
case OpNeg16:
- return rewriteValuegeneric_OpNeg16(v, config)
+ return rewriteValuegeneric_OpNeg16(v)
case OpNeg32:
- return rewriteValuegeneric_OpNeg32(v, config)
+ return rewriteValuegeneric_OpNeg32(v)
case OpNeg32F:
- return rewriteValuegeneric_OpNeg32F(v, config)
+ return rewriteValuegeneric_OpNeg32F(v)
case OpNeg64:
- return rewriteValuegeneric_OpNeg64(v, config)
+ return rewriteValuegeneric_OpNeg64(v)
case OpNeg64F:
- return rewriteValuegeneric_OpNeg64F(v, config)
+ return rewriteValuegeneric_OpNeg64F(v)
case OpNeg8:
- return rewriteValuegeneric_OpNeg8(v, config)
+ return rewriteValuegeneric_OpNeg8(v)
case OpNeq16:
- return rewriteValuegeneric_OpNeq16(v, config)
+ return rewriteValuegeneric_OpNeq16(v)
case OpNeq32:
- return rewriteValuegeneric_OpNeq32(v, config)
+ return rewriteValuegeneric_OpNeq32(v)
case OpNeq64:
- return rewriteValuegeneric_OpNeq64(v, config)
+ return rewriteValuegeneric_OpNeq64(v)
case OpNeq8:
- return rewriteValuegeneric_OpNeq8(v, config)
+ return rewriteValuegeneric_OpNeq8(v)
case OpNeqB:
- return rewriteValuegeneric_OpNeqB(v, config)
+ return rewriteValuegeneric_OpNeqB(v)
case OpNeqInter:
- return rewriteValuegeneric_OpNeqInter(v, config)
+ return rewriteValuegeneric_OpNeqInter(v)
case OpNeqPtr:
- return rewriteValuegeneric_OpNeqPtr(v, config)
+ return rewriteValuegeneric_OpNeqPtr(v)
case OpNeqSlice:
- return rewriteValuegeneric_OpNeqSlice(v, config)
+ return rewriteValuegeneric_OpNeqSlice(v)
case OpNilCheck:
- return rewriteValuegeneric_OpNilCheck(v, config)
+ return rewriteValuegeneric_OpNilCheck(v)
case OpNot:
- return rewriteValuegeneric_OpNot(v, config)
+ return rewriteValuegeneric_OpNot(v)
case OpOffPtr:
- return rewriteValuegeneric_OpOffPtr(v, config)
+ return rewriteValuegeneric_OpOffPtr(v)
case OpOr16:
- return rewriteValuegeneric_OpOr16(v, config)
+ return rewriteValuegeneric_OpOr16(v)
case OpOr32:
- return rewriteValuegeneric_OpOr32(v, config)
+ return rewriteValuegeneric_OpOr32(v)
case OpOr64:
- return rewriteValuegeneric_OpOr64(v, config)
+ return rewriteValuegeneric_OpOr64(v)
case OpOr8:
- return rewriteValuegeneric_OpOr8(v, config)
+ return rewriteValuegeneric_OpOr8(v)
case OpPhi:
- return rewriteValuegeneric_OpPhi(v, config)
+ return rewriteValuegeneric_OpPhi(v)
case OpPtrIndex:
- return rewriteValuegeneric_OpPtrIndex(v, config)
+ return rewriteValuegeneric_OpPtrIndex(v)
case OpRound32F:
- return rewriteValuegeneric_OpRound32F(v, config)
+ return rewriteValuegeneric_OpRound32F(v)
case OpRound64F:
- return rewriteValuegeneric_OpRound64F(v, config)
+ return rewriteValuegeneric_OpRound64F(v)
case OpRsh16Ux16:
- return rewriteValuegeneric_OpRsh16Ux16(v, config)
+ return rewriteValuegeneric_OpRsh16Ux16(v)
case OpRsh16Ux32:
- return rewriteValuegeneric_OpRsh16Ux32(v, config)
+ return rewriteValuegeneric_OpRsh16Ux32(v)
case OpRsh16Ux64:
- return rewriteValuegeneric_OpRsh16Ux64(v, config)
+ return rewriteValuegeneric_OpRsh16Ux64(v)
case OpRsh16Ux8:
- return rewriteValuegeneric_OpRsh16Ux8(v, config)
+ return rewriteValuegeneric_OpRsh16Ux8(v)
case OpRsh16x16:
- return rewriteValuegeneric_OpRsh16x16(v, config)
+ return rewriteValuegeneric_OpRsh16x16(v)
case OpRsh16x32:
- return rewriteValuegeneric_OpRsh16x32(v, config)
+ return rewriteValuegeneric_OpRsh16x32(v)
case OpRsh16x64:
- return rewriteValuegeneric_OpRsh16x64(v, config)
+ return rewriteValuegeneric_OpRsh16x64(v)
case OpRsh16x8:
- return rewriteValuegeneric_OpRsh16x8(v, config)
+ return rewriteValuegeneric_OpRsh16x8(v)
case OpRsh32Ux16:
- return rewriteValuegeneric_OpRsh32Ux16(v, config)
+ return rewriteValuegeneric_OpRsh32Ux16(v)
case OpRsh32Ux32:
- return rewriteValuegeneric_OpRsh32Ux32(v, config)
+ return rewriteValuegeneric_OpRsh32Ux32(v)
case OpRsh32Ux64:
- return rewriteValuegeneric_OpRsh32Ux64(v, config)
+ return rewriteValuegeneric_OpRsh32Ux64(v)
case OpRsh32Ux8:
- return rewriteValuegeneric_OpRsh32Ux8(v, config)
+ return rewriteValuegeneric_OpRsh32Ux8(v)
case OpRsh32x16:
- return rewriteValuegeneric_OpRsh32x16(v, config)
+ return rewriteValuegeneric_OpRsh32x16(v)
case OpRsh32x32:
- return rewriteValuegeneric_OpRsh32x32(v, config)
+ return rewriteValuegeneric_OpRsh32x32(v)
case OpRsh32x64:
- return rewriteValuegeneric_OpRsh32x64(v, config)
+ return rewriteValuegeneric_OpRsh32x64(v)
case OpRsh32x8:
- return rewriteValuegeneric_OpRsh32x8(v, config)
+ return rewriteValuegeneric_OpRsh32x8(v)
case OpRsh64Ux16:
- return rewriteValuegeneric_OpRsh64Ux16(v, config)
+ return rewriteValuegeneric_OpRsh64Ux16(v)
case OpRsh64Ux32:
- return rewriteValuegeneric_OpRsh64Ux32(v, config)
+ return rewriteValuegeneric_OpRsh64Ux32(v)
case OpRsh64Ux64:
- return rewriteValuegeneric_OpRsh64Ux64(v, config)
+ return rewriteValuegeneric_OpRsh64Ux64(v)
case OpRsh64Ux8:
- return rewriteValuegeneric_OpRsh64Ux8(v, config)
+ return rewriteValuegeneric_OpRsh64Ux8(v)
case OpRsh64x16:
- return rewriteValuegeneric_OpRsh64x16(v, config)
+ return rewriteValuegeneric_OpRsh64x16(v)
case OpRsh64x32:
- return rewriteValuegeneric_OpRsh64x32(v, config)
+ return rewriteValuegeneric_OpRsh64x32(v)
case OpRsh64x64:
- return rewriteValuegeneric_OpRsh64x64(v, config)
+ return rewriteValuegeneric_OpRsh64x64(v)
case OpRsh64x8:
- return rewriteValuegeneric_OpRsh64x8(v, config)
+ return rewriteValuegeneric_OpRsh64x8(v)
case OpRsh8Ux16:
- return rewriteValuegeneric_OpRsh8Ux16(v, config)
+ return rewriteValuegeneric_OpRsh8Ux16(v)
case OpRsh8Ux32:
- return rewriteValuegeneric_OpRsh8Ux32(v, config)
+ return rewriteValuegeneric_OpRsh8Ux32(v)
case OpRsh8Ux64:
- return rewriteValuegeneric_OpRsh8Ux64(v, config)
+ return rewriteValuegeneric_OpRsh8Ux64(v)
case OpRsh8Ux8:
- return rewriteValuegeneric_OpRsh8Ux8(v, config)
+ return rewriteValuegeneric_OpRsh8Ux8(v)
case OpRsh8x16:
- return rewriteValuegeneric_OpRsh8x16(v, config)
+ return rewriteValuegeneric_OpRsh8x16(v)
case OpRsh8x32:
- return rewriteValuegeneric_OpRsh8x32(v, config)
+ return rewriteValuegeneric_OpRsh8x32(v)
case OpRsh8x64:
- return rewriteValuegeneric_OpRsh8x64(v, config)
+ return rewriteValuegeneric_OpRsh8x64(v)
case OpRsh8x8:
- return rewriteValuegeneric_OpRsh8x8(v, config)
+ return rewriteValuegeneric_OpRsh8x8(v)
case OpSignExt16to32:
- return rewriteValuegeneric_OpSignExt16to32(v, config)
+ return rewriteValuegeneric_OpSignExt16to32(v)
case OpSignExt16to64:
- return rewriteValuegeneric_OpSignExt16to64(v, config)
+ return rewriteValuegeneric_OpSignExt16to64(v)
case OpSignExt32to64:
- return rewriteValuegeneric_OpSignExt32to64(v, config)
+ return rewriteValuegeneric_OpSignExt32to64(v)
case OpSignExt8to16:
- return rewriteValuegeneric_OpSignExt8to16(v, config)
+ return rewriteValuegeneric_OpSignExt8to16(v)
case OpSignExt8to32:
- return rewriteValuegeneric_OpSignExt8to32(v, config)
+ return rewriteValuegeneric_OpSignExt8to32(v)
case OpSignExt8to64:
- return rewriteValuegeneric_OpSignExt8to64(v, config)
+ return rewriteValuegeneric_OpSignExt8to64(v)
case OpSliceCap:
- return rewriteValuegeneric_OpSliceCap(v, config)
+ return rewriteValuegeneric_OpSliceCap(v)
case OpSliceLen:
- return rewriteValuegeneric_OpSliceLen(v, config)
+ return rewriteValuegeneric_OpSliceLen(v)
case OpSlicePtr:
- return rewriteValuegeneric_OpSlicePtr(v, config)
+ return rewriteValuegeneric_OpSlicePtr(v)
case OpSlicemask:
- return rewriteValuegeneric_OpSlicemask(v, config)
+ return rewriteValuegeneric_OpSlicemask(v)
case OpSqrt:
- return rewriteValuegeneric_OpSqrt(v, config)
+ return rewriteValuegeneric_OpSqrt(v)
case OpStore:
- return rewriteValuegeneric_OpStore(v, config)
+ return rewriteValuegeneric_OpStore(v)
case OpStringLen:
- return rewriteValuegeneric_OpStringLen(v, config)
+ return rewriteValuegeneric_OpStringLen(v)
case OpStringPtr:
- return rewriteValuegeneric_OpStringPtr(v, config)
+ return rewriteValuegeneric_OpStringPtr(v)
case OpStructSelect:
- return rewriteValuegeneric_OpStructSelect(v, config)
+ return rewriteValuegeneric_OpStructSelect(v)
case OpSub16:
- return rewriteValuegeneric_OpSub16(v, config)
+ return rewriteValuegeneric_OpSub16(v)
case OpSub32:
- return rewriteValuegeneric_OpSub32(v, config)
+ return rewriteValuegeneric_OpSub32(v)
case OpSub32F:
- return rewriteValuegeneric_OpSub32F(v, config)
+ return rewriteValuegeneric_OpSub32F(v)
case OpSub64:
- return rewriteValuegeneric_OpSub64(v, config)
+ return rewriteValuegeneric_OpSub64(v)
case OpSub64F:
- return rewriteValuegeneric_OpSub64F(v, config)
+ return rewriteValuegeneric_OpSub64F(v)
case OpSub8:
- return rewriteValuegeneric_OpSub8(v, config)
+ return rewriteValuegeneric_OpSub8(v)
case OpTrunc16to8:
- return rewriteValuegeneric_OpTrunc16to8(v, config)
+ return rewriteValuegeneric_OpTrunc16to8(v)
case OpTrunc32to16:
- return rewriteValuegeneric_OpTrunc32to16(v, config)
+ return rewriteValuegeneric_OpTrunc32to16(v)
case OpTrunc32to8:
- return rewriteValuegeneric_OpTrunc32to8(v, config)
+ return rewriteValuegeneric_OpTrunc32to8(v)
case OpTrunc64to16:
- return rewriteValuegeneric_OpTrunc64to16(v, config)
+ return rewriteValuegeneric_OpTrunc64to16(v)
case OpTrunc64to32:
- return rewriteValuegeneric_OpTrunc64to32(v, config)
+ return rewriteValuegeneric_OpTrunc64to32(v)
case OpTrunc64to8:
- return rewriteValuegeneric_OpTrunc64to8(v, config)
+ return rewriteValuegeneric_OpTrunc64to8(v)
case OpXor16:
- return rewriteValuegeneric_OpXor16(v, config)
+ return rewriteValuegeneric_OpXor16(v)
case OpXor32:
- return rewriteValuegeneric_OpXor32(v, config)
+ return rewriteValuegeneric_OpXor32(v)
case OpXor64:
- return rewriteValuegeneric_OpXor64(v, config)
+ return rewriteValuegeneric_OpXor64(v)
case OpXor8:
- return rewriteValuegeneric_OpXor8(v, config)
+ return rewriteValuegeneric_OpXor8(v)
case OpZero:
- return rewriteValuegeneric_OpZero(v, config)
+ return rewriteValuegeneric_OpZero(v)
case OpZeroExt16to32:
- return rewriteValuegeneric_OpZeroExt16to32(v, config)
+ return rewriteValuegeneric_OpZeroExt16to32(v)
case OpZeroExt16to64:
- return rewriteValuegeneric_OpZeroExt16to64(v, config)
+ return rewriteValuegeneric_OpZeroExt16to64(v)
case OpZeroExt32to64:
- return rewriteValuegeneric_OpZeroExt32to64(v, config)
+ return rewriteValuegeneric_OpZeroExt32to64(v)
case OpZeroExt8to16:
- return rewriteValuegeneric_OpZeroExt8to16(v, config)
+ return rewriteValuegeneric_OpZeroExt8to16(v)
case OpZeroExt8to32:
- return rewriteValuegeneric_OpZeroExt8to32(v, config)
+ return rewriteValuegeneric_OpZeroExt8to32(v)
case OpZeroExt8to64:
- return rewriteValuegeneric_OpZeroExt8to64(v, config)
+ return rewriteValuegeneric_OpZeroExt8to64(v)
}
return false
}
-func rewriteValuegeneric_OpAdd16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpAdd16(v *Value) bool {
b := v.Block
_ = b
// match: (Add16 (Const16 [c]) (Const16 [d]))
}
return false
}
-func rewriteValuegeneric_OpAdd32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpAdd32(v *Value) bool {
b := v.Block
_ = b
// match: (Add32 (Const32 [c]) (Const32 [d]))
}
return false
}
-func rewriteValuegeneric_OpAdd32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpAdd32F(v *Value) bool {
// match: (Add32F (Const32F [c]) (Const32F [d]))
// cond:
// result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))])
}
return false
}
-func rewriteValuegeneric_OpAdd64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpAdd64(v *Value) bool {
b := v.Block
_ = b
// match: (Add64 (Const64 [c]) (Const64 [d]))
}
return false
}
-func rewriteValuegeneric_OpAdd64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpAdd64F(v *Value) bool {
// match: (Add64F (Const64F [c]) (Const64F [d]))
// cond:
// result: (Const64F [f2i(i2f(c) + i2f(d))])
}
return false
}
-func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpAdd8(v *Value) bool {
b := v.Block
_ = b
// match: (Add8 (Const8 [c]) (Const8 [d]))
}
return false
}
-func rewriteValuegeneric_OpAddPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpAddPtr(v *Value) bool {
// match: (AddPtr <t> x (Const64 [c]))
// cond:
// result: (OffPtr <t> x [c])
}
return false
}
-func rewriteValuegeneric_OpAnd16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpAnd16(v *Value) bool {
b := v.Block
_ = b
// match: (And16 (Const16 [c]) (Const16 [d]))
}
return false
}
-func rewriteValuegeneric_OpAnd32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpAnd32(v *Value) bool {
b := v.Block
_ = b
// match: (And32 (Const32 [c]) (Const32 [d]))
}
return false
}
-func rewriteValuegeneric_OpAnd64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpAnd64(v *Value) bool {
b := v.Block
_ = b
// match: (And64 (Const64 [c]) (Const64 [d]))
}
return false
}
-func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpAnd8(v *Value) bool {
b := v.Block
_ = b
// match: (And8 (Const8 [c]) (Const8 [d]))
}
return false
}
-func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpArg(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Arg {n} [off])
// cond: v.Type.IsString()
- // result: (StringMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
+ // result: (StringMake (Arg <fe.TypeBytePtr()> {n} [off]) (Arg <fe.TypeInt()> {n} [off+config.PtrSize]))
for {
off := v.AuxInt
n := v.Aux
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpArg, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpArg, fe.TypeBytePtr())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeInt())
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
}
// match: (Arg {n} [off])
// cond: v.Type.IsSlice()
- // result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
+ // result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <fe.TypeInt()> {n} [off+2*config.PtrSize]))
for {
off := v.AuxInt
n := v.Aux
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeInt())
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpArg, config.fe.TypeInt())
+ v2 := b.NewValue0(v.Pos, OpArg, fe.TypeInt())
v2.AuxInt = off + 2*config.PtrSize
v2.Aux = n
v.AddArg(v2)
}
// match: (Arg {n} [off])
// cond: v.Type.IsInterface()
- // result: (IMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
+ // result: (IMake (Arg <fe.TypeBytePtr()> {n} [off]) (Arg <fe.TypeBytePtr()> {n} [off+config.PtrSize]))
for {
off := v.AuxInt
n := v.Aux
break
}
v.reset(OpIMake)
- v0 := b.NewValue0(v.Pos, OpArg, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpArg, fe.TypeBytePtr())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeBytePtr())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeBytePtr())
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
}
// match: (Arg {n} [off])
// cond: v.Type.IsComplex() && v.Type.Size() == 16
- // result: (ComplexMake (Arg <config.fe.TypeFloat64()> {n} [off]) (Arg <config.fe.TypeFloat64()> {n} [off+8]))
+ // result: (ComplexMake (Arg <fe.TypeFloat64()> {n} [off]) (Arg <fe.TypeFloat64()> {n} [off+8]))
for {
off := v.AuxInt
n := v.Aux
break
}
v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpArg, config.fe.TypeFloat64())
+ v0 := b.NewValue0(v.Pos, OpArg, fe.TypeFloat64())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeFloat64())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeFloat64())
v1.AuxInt = off + 8
v1.Aux = n
v.AddArg(v1)
}
// match: (Arg {n} [off])
// cond: v.Type.IsComplex() && v.Type.Size() == 8
- // result: (ComplexMake (Arg <config.fe.TypeFloat32()> {n} [off]) (Arg <config.fe.TypeFloat32()> {n} [off+4]))
+ // result: (ComplexMake (Arg <fe.TypeFloat32()> {n} [off]) (Arg <fe.TypeFloat32()> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
break
}
v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpArg, config.fe.TypeFloat32())
+ v0 := b.NewValue0(v.Pos, OpArg, fe.TypeFloat32())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpArg, config.fe.TypeFloat32())
+ v1 := b.NewValue0(v.Pos, OpArg, fe.TypeFloat32())
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
return true
}
// match: (Arg <t>)
- // cond: t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
// result: (StructMake0)
for {
t := v.Type
- if !(t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake0)
return true
}
// match: (Arg <t> {n} [off])
- // cond: t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
// result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
for {
t := v.Type
off := v.AuxInt
n := v.Aux
- if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake1)
return true
}
// match: (Arg <t> {n} [off])
- // cond: t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
// result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
for {
t := v.Type
off := v.AuxInt
n := v.Aux
- if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake2)
return true
}
// match: (Arg <t> {n} [off])
- // cond: t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
// result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
for {
t := v.Type
off := v.AuxInt
n := v.Aux
- if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake3)
return true
}
// match: (Arg <t> {n} [off])
- // cond: t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
// result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]) (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]) (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]) (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
for {
t := v.Type
off := v.AuxInt
n := v.Aux
- if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake4)
return true
}
// match: (Arg <t> {n} [off])
- // cond: t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t)
+ // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
// result: (ArrayMake1 (Arg <t.ElemType()> {n} [off]))
for {
t := v.Type
off := v.AuxInt
n := v.Aux
- if !(t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t)) {
+ if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
break
}
v.reset(OpArrayMake1)
}
return false
}
-func rewriteValuegeneric_OpArraySelect(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpArraySelect(v *Value) bool {
// match: (ArraySelect (ArrayMake1 x))
// cond:
// result: x
}
return false
}
-func rewriteValuegeneric_OpCom16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpCom16(v *Value) bool {
// match: (Com16 (Com16 x))
// cond:
// result: x
}
return false
}
-func rewriteValuegeneric_OpCom32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpCom32(v *Value) bool {
// match: (Com32 (Com32 x))
// cond:
// result: x
}
return false
}
-func rewriteValuegeneric_OpCom64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpCom64(v *Value) bool {
// match: (Com64 (Com64 x))
// cond:
// result: x
}
return false
}
-func rewriteValuegeneric_OpCom8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpCom8(v *Value) bool {
// match: (Com8 (Com8 x))
// cond:
// result: x
}
return false
}
-func rewriteValuegeneric_OpConstInterface(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpConstInterface(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ConstInterface)
// cond:
- // result: (IMake (ConstNil <config.fe.TypeBytePtr()>) (ConstNil <config.fe.TypeBytePtr()>))
+ // result: (IMake (ConstNil <fe.TypeBytePtr()>) (ConstNil <fe.TypeBytePtr()>))
for {
v.reset(OpIMake)
- v0 := b.NewValue0(v.Pos, OpConstNil, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpConstNil, fe.TypeBytePtr())
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConstNil, config.fe.TypeBytePtr())
+ v1 := b.NewValue0(v.Pos, OpConstNil, fe.TypeBytePtr())
v.AddArg(v1)
return true
}
}
-func rewriteValuegeneric_OpConstSlice(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpConstSlice(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ConstSlice)
// cond: config.PtrSize == 4
- // result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const32 <config.fe.TypeInt()> [0]) (Const32 <config.fe.TypeInt()> [0]))
+ // result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const32 <fe.TypeInt()> [0]) (Const32 <fe.TypeInt()> [0]))
for {
if !(config.PtrSize == 4) {
break
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.ElemType().PtrTo())
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpConst32, fe.TypeInt())
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeInt())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeInt())
v2.AuxInt = 0
v.AddArg(v2)
return true
}
// match: (ConstSlice)
// cond: config.PtrSize == 8
- // result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const64 <config.fe.TypeInt()> [0]) (Const64 <config.fe.TypeInt()> [0]))
+ // result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const64 <fe.TypeInt()> [0]) (Const64 <fe.TypeInt()> [0]))
for {
if !(config.PtrSize == 8) {
break
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.ElemType().PtrTo())
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeInt())
v1.AuxInt = 0
v.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeInt())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeInt())
v2.AuxInt = 0
v.AddArg(v2)
return true
}
return false
}
-func rewriteValuegeneric_OpConstString(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpConstString(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (ConstString {s})
// cond: config.PtrSize == 4 && s.(string) == ""
- // result: (StringMake (ConstNil) (Const32 <config.fe.TypeInt()> [0]))
+ // result: (StringMake (ConstNil) (Const32 <fe.TypeInt()> [0]))
for {
s := v.Aux
if !(config.PtrSize == 4 && s.(string) == "") {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpConstNil, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpConstNil, fe.TypeBytePtr())
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpConst32, fe.TypeInt())
v1.AuxInt = 0
v.AddArg(v1)
return true
}
// match: (ConstString {s})
// cond: config.PtrSize == 8 && s.(string) == ""
- // result: (StringMake (ConstNil) (Const64 <config.fe.TypeInt()> [0]))
+ // result: (StringMake (ConstNil) (Const64 <fe.TypeInt()> [0]))
for {
s := v.Aux
if !(config.PtrSize == 8 && s.(string) == "") {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpConstNil, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpConstNil, fe.TypeBytePtr())
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeInt())
v1.AuxInt = 0
v.AddArg(v1)
return true
}
// match: (ConstString {s})
// cond: config.PtrSize == 4 && s.(string) != ""
- // result: (StringMake (Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))} (SB)) (Const32 <config.fe.TypeInt()> [int64(len(s.(string)))]))
+ // result: (StringMake (Addr <fe.TypeBytePtr()> {fe.StringData(s.(string))} (SB)) (Const32 <fe.TypeInt()> [int64(len(s.(string)))]))
for {
s := v.Aux
if !(config.PtrSize == 4 && s.(string) != "") {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpAddr, config.fe.TypeBytePtr())
- v0.Aux = config.fe.StringData(s.(string))
- v1 := b.NewValue0(v.Pos, OpSB, config.fe.TypeUintptr())
+ v0 := b.NewValue0(v.Pos, OpAddr, fe.TypeBytePtr())
+ v0.Aux = fe.StringData(s.(string))
+ v1 := b.NewValue0(v.Pos, OpSB, fe.TypeUintptr())
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeInt())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeInt())
v2.AuxInt = int64(len(s.(string)))
v.AddArg(v2)
return true
}
// match: (ConstString {s})
// cond: config.PtrSize == 8 && s.(string) != ""
- // result: (StringMake (Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))} (SB)) (Const64 <config.fe.TypeInt()> [int64(len(s.(string)))]))
+ // result: (StringMake (Addr <fe.TypeBytePtr()> {fe.StringData(s.(string))} (SB)) (Const64 <fe.TypeInt()> [int64(len(s.(string)))]))
for {
s := v.Aux
if !(config.PtrSize == 8 && s.(string) != "") {
break
}
v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpAddr, config.fe.TypeBytePtr())
- v0.Aux = config.fe.StringData(s.(string))
- v1 := b.NewValue0(v.Pos, OpSB, config.fe.TypeUintptr())
+ v0 := b.NewValue0(v.Pos, OpAddr, fe.TypeBytePtr())
+ v0.Aux = fe.StringData(s.(string))
+ v1 := b.NewValue0(v.Pos, OpSB, fe.TypeUintptr())
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeInt())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeInt())
v2.AuxInt = int64(len(s.(string)))
v.AddArg(v2)
return true
}
return false
}
-func rewriteValuegeneric_OpConvert(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpConvert(v *Value) bool {
// match: (Convert (Add64 (Convert ptr mem) off) mem)
// cond:
// result: (Add64 ptr off)
}
return false
}
-func rewriteValuegeneric_OpCvt32Fto64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpCvt32Fto64F(v *Value) bool {
// match: (Cvt32Fto64F (Const32F [c]))
// cond:
// result: (Const64F [c])
}
return false
}
-func rewriteValuegeneric_OpCvt64Fto32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool {
// match: (Cvt64Fto32F (Const64F [c]))
// cond:
// result: (Const32F [f2i(float64(i2f32(c)))])
}
return false
}
-func rewriteValuegeneric_OpDiv16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpDiv16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16 (Const16 [c]) (Const16 [d]))
// cond: d != 0
// result: (Const16 [int64(int16(c)/int16(d))])
}
// match: (Div16 <t> x (Const16 [-1<<15]))
// cond:
- // result: (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <config.fe.TypeUInt64()> [15]))
+ // result: (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <fe.TypeUInt64()> [15]))
for {
t := v.Type
x := v.Args[0]
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 15
v.AddArg(v2)
return true
}
// match: (Div16 <t> n (Const16 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [15])) (Const64 <config.fe.TypeUInt64()> [16-log2(c)]))) (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <fe.TypeUInt64()> [15])) (Const64 <fe.TypeUInt64()> [16-log2(c)]))) (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
v2 := b.NewValue0(v.Pos, OpRsh16x64, t)
v2.AddArg(n)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = 15
v2.AddArg(v3)
v1.AddArg(v2)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 16 - log2(c)
v1.AddArg(v4)
v0.AddArg(v1)
v.AddArg(v0)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = log2(c)
v.AddArg(v5)
return true
}
// match: (Div16 <t> x (Const16 [c]))
// cond: smagicOK(16,c)
- // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(smagic(16,c).m)]) (SignExt16to32 x)) (Const64 <config.fe.TypeUInt64()> [16+smagic(16,c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <config.fe.TypeUInt64()> [31])))
+ // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(smagic(16,c).m)]) (SignExt16to32 x)) (Const64 <fe.TypeUInt64()> [16+smagic(16,c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <fe.TypeUInt64()> [31])))
for {
t := v.Type
x := v.Args[0]
v.reset(OpSub16)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
- v1 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMul32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v2.AuxInt = int64(smagic(16, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v3 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 16 + smagic(16, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
- v6 := b.NewValue0(v.Pos, OpSignExt16to32, config.fe.TypeInt32())
+ v6 := b.NewValue0(v.Pos, OpSignExt16to32, fe.TypeInt32())
v6.AddArg(x)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v7 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v7.AuxInt = 31
v5.AddArg(v7)
v.AddArg(v5)
}
return false
}
-func rewriteValuegeneric_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpDiv16u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div16u (Const16 [c]) (Const16 [d]))
// cond: d != 0
// result: (Const16 [int64(int16(uint16(c)/uint16(d)))])
}
// match: (Div16u n (Const16 [c]))
// cond: isPowerOfTwo(c&0xffff)
- // result: (Rsh16Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xffff)]))
+ // result: (Rsh16Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xffff)]))
for {
n := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh16Ux64)
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = log2(c & 0xffff)
v.AddArg(v0)
return true
}
// match: (Div16u x (Const16 [c]))
// cond: umagicOK(16, c) && config.RegSize == 8
- // result: (Trunc64to16 (Rsh64Ux64 <config.fe.TypeUInt64()> (Mul64 <config.fe.TypeUInt64()> (Const64 <config.fe.TypeUInt64()> [int64(1<<16+umagic(16,c).m)]) (ZeroExt16to64 x)) (Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s])))
+ // result: (Trunc64to16 (Rsh64Ux64 <fe.TypeUInt64()> (Mul64 <fe.TypeUInt64()> (Const64 <fe.TypeUInt64()> [int64(1<<16+umagic(16,c).m)]) (ZeroExt16to64 x)) (Const64 <fe.TypeUInt64()> [16+umagic(16,c).s])))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpTrunc64to16)
- v0 := b.NewValue0(v.Pos, OpRsh64Ux64, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpMul64, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMul64, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = int64(1<<16 + umagic(16, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, fe.TypeUInt64())
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 16 + umagic(16, c).s
v0.AddArg(v4)
v.AddArg(v0)
}
// match: (Div16u x (Const16 [c]))
// cond: umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0
- // result: (Trunc32to16 (Rsh32Ux64 <config.fe.TypeUInt32()> (Mul32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(1<<15+umagic(16,c).m/2)]) (ZeroExt16to32 x)) (Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-1])))
+ // result: (Trunc32to16 (Rsh32Ux64 <fe.TypeUInt32()> (Mul32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(1<<15+umagic(16,c).m/2)]) (ZeroExt16to32 x)) (Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-1])))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpTrunc32to16)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux64, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMul32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v2.AuxInt = int64(1<<15 + umagic(16, c).m/2)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 16 + umagic(16, c).s - 1
v0.AddArg(v4)
v.AddArg(v0)
}
// match: (Div16u x (Const16 [c]))
// cond: umagicOK(16, c) && config.RegSize == 4 && c&1 == 0
- // result: (Trunc32to16 (Rsh32Ux64 <config.fe.TypeUInt32()> (Mul32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(1<<15+(umagic(16,c).m+1)/2)]) (Rsh32Ux64 <config.fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <config.fe.TypeUInt64()> [1]))) (Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-2])))
+ // result: (Trunc32to16 (Rsh32Ux64 <fe.TypeUInt32()> (Mul32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(1<<15+(umagic(16,c).m+1)/2)]) (Rsh32Ux64 <fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <fe.TypeUInt64()> [1]))) (Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-2])))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpTrunc32to16)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux64, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMul32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v2.AuxInt = int64(1<<15 + (umagic(16, c).m+1)/2)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh32Ux64, config.fe.TypeUInt32())
- v4 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux64, fe.TypeUInt32())
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 1
v3.AddArg(v5)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v6.AuxInt = 16 + umagic(16, c).s - 2
v0.AddArg(v6)
v.AddArg(v0)
}
// match: (Div16u x (Const16 [c]))
// cond: umagicOK(16, c) && config.RegSize == 4
- // result: (Trunc32to16 (Rsh32Ux64 <config.fe.TypeUInt32()> (Avg32u (Lsh32x64 <config.fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <config.fe.TypeUInt64()> [16])) (Mul32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(umagic(16,c).m)]) (ZeroExt16to32 x))) (Const64 <config.fe.TypeUInt64()> [16+umagic(16,c).s-1])))
+ // result: (Trunc32to16 (Rsh32Ux64 <fe.TypeUInt32()> (Avg32u (Lsh32x64 <fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <fe.TypeUInt64()> [16])) (Mul32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(umagic(16,c).m)]) (ZeroExt16to32 x))) (Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-1])))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpTrunc32to16)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux64, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpAvg32u, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpLsh32x64, config.fe.TypeUInt32())
- v3 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpAvg32u, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpLsh32x64, fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 16
v2.AddArg(v4)
v1.AddArg(v2)
- v5 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeUInt32())
- v6 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpMul32, fe.TypeUInt32())
+ v6 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v6.AuxInt = int64(umagic(16, c).m)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpZeroExt16to32, config.fe.TypeUInt32())
+ v7 := b.NewValue0(v.Pos, OpZeroExt16to32, fe.TypeUInt32())
v7.AddArg(x)
v5.AddArg(v7)
v1.AddArg(v5)
v0.AddArg(v1)
- v8 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v8 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v8.AuxInt = 16 + umagic(16, c).s - 1
v0.AddArg(v8)
v.AddArg(v0)
}
return false
}
-func rewriteValuegeneric_OpDiv32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpDiv32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32 (Const32 [c]) (Const32 [d]))
// cond: d != 0
// result: (Const32 [int64(int32(c)/int32(d))])
}
// match: (Div32 <t> x (Const32 [-1<<31]))
// cond:
- // result: (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <config.fe.TypeUInt64()> [31]))
+ // result: (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <fe.TypeUInt64()> [31]))
for {
t := v.Type
x := v.Args[0]
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 31
v.AddArg(v2)
return true
}
// match: (Div32 <t> n (Const32 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [31])) (Const64 <config.fe.TypeUInt64()> [32-log2(c)]))) (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <fe.TypeUInt64()> [31])) (Const64 <fe.TypeUInt64()> [32-log2(c)]))) (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
v2 := b.NewValue0(v.Pos, OpRsh32x64, t)
v2.AddArg(n)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = 31
v2.AddArg(v3)
v1.AddArg(v2)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 32 - log2(c)
v1.AddArg(v4)
v0.AddArg(v1)
v.AddArg(v0)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = log2(c)
v.AddArg(v5)
return true
}
// match: (Div32 <t> x (Const32 [c]))
// cond: smagicOK(32,c) && config.RegSize == 8
- // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <config.fe.TypeUInt64()> (Const64 <config.fe.TypeUInt64()> [int64(smagic(32,c).m)]) (SignExt32to64 x)) (Const64 <config.fe.TypeUInt64()> [32+smagic(32,c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <config.fe.TypeUInt64()> [63])))
+ // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <fe.TypeUInt64()> (Const64 <fe.TypeUInt64()> [int64(smagic(32,c).m)]) (SignExt32to64 x)) (Const64 <fe.TypeUInt64()> [32+smagic(32,c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <fe.TypeUInt64()> [63])))
for {
t := v.Type
x := v.Args[0]
v.reset(OpSub32)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
- v1 := b.NewValue0(v.Pos, OpMul64, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMul64, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = int64(smagic(32, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 32 + smagic(32, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
- v6 := b.NewValue0(v.Pos, OpSignExt32to64, config.fe.TypeInt64())
+ v6 := b.NewValue0(v.Pos, OpSignExt32to64, fe.TypeInt64())
v6.AddArg(x)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v7 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v7.AuxInt = 63
v5.AddArg(v7)
v.AddArg(v5)
}
// match: (Div32 <t> x (Const32 [c]))
// cond: smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 == 0
- // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <config.fe.TypeUInt32()> [int64(int32(smagic(32,c).m/2))]) x) (Const64 <config.fe.TypeUInt64()> [smagic(32,c).s-1])) (Rsh32x64 <t> x (Const64 <config.fe.TypeUInt64()> [31])))
+ // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <fe.TypeUInt32()> [int64(int32(smagic(32,c).m/2))]) x) (Const64 <fe.TypeUInt64()> [smagic(32,c).s-1])) (Rsh32x64 <t> x (Const64 <fe.TypeUInt64()> [31])))
for {
t := v.Type
x := v.Args[0]
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
v1 := b.NewValue0(v.Pos, OpHmul32, t)
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v2.AuxInt = int64(int32(smagic(32, c).m / 2))
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = smagic(32, c).s - 1
v0.AddArg(v3)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpRsh32x64, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 31
v4.AddArg(v5)
v.AddArg(v4)
}
// match: (Div32 <t> x (Const32 [c]))
// cond: smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 != 0
- // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <config.fe.TypeUInt32()> [int64(int32(smagic(32,c).m))]) x) x) (Const64 <config.fe.TypeUInt64()> [smagic(32,c).s])) (Rsh32x64 <t> x (Const64 <config.fe.TypeUInt64()> [31])))
+ // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <fe.TypeUInt32()> [int64(int32(smagic(32,c).m))]) x) x) (Const64 <fe.TypeUInt64()> [smagic(32,c).s])) (Rsh32x64 <t> x (Const64 <fe.TypeUInt64()> [31])))
for {
t := v.Type
x := v.Args[0]
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
v1 := b.NewValue0(v.Pos, OpAdd32, t)
v2 := b.NewValue0(v.Pos, OpHmul32, t)
- v3 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v3.AuxInt = int64(int32(smagic(32, c).m))
v2.AddArg(v3)
v2.AddArg(x)
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = smagic(32, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
v5.AddArg(x)
- v6 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v6.AuxInt = 31
v5.AddArg(v6)
v.AddArg(v5)
}
return false
}
-func rewriteValuegeneric_OpDiv32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpDiv32F(v *Value) bool {
// match: (Div32F (Const32F [c]) (Const32F [d]))
// cond:
// result: (Const32F [f2i(float64(i2f32(c) / i2f32(d)))])
}
return false
}
-func rewriteValuegeneric_OpDiv32u(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpDiv32u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div32u (Const32 [c]) (Const32 [d]))
// cond: d != 0
// result: (Const32 [int64(int32(uint32(c)/uint32(d)))])
}
// match: (Div32u n (Const32 [c]))
// cond: isPowerOfTwo(c&0xffffffff)
- // result: (Rsh32Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xffffffff)]))
+ // result: (Rsh32Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xffffffff)]))
for {
n := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh32Ux64)
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = log2(c & 0xffffffff)
v.AddArg(v0)
return true
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0
- // result: (Rsh32Ux64 <config.fe.TypeUInt32()> (Hmul32u <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(int32(1<<31+umagic(32,c).m/2))]) x) (Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-1]))
+ // result: (Rsh32Ux64 <fe.TypeUInt32()> (Hmul32u <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(int32(1<<31+umagic(32,c).m/2))]) x) (Const64 <fe.TypeUInt64()> [umagic(32,c).s-1]))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpRsh32Ux64)
- v.Type = config.fe.TypeUInt32()
- v0 := b.NewValue0(v.Pos, OpHmul32u, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v.Type = fe.TypeUInt32()
+ v0 := b.NewValue0(v.Pos, OpHmul32u, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v1.AuxInt = int64(int32(1<<31 + umagic(32, c).m/2))
v0.AddArg(v1)
v0.AddArg(x)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = umagic(32, c).s - 1
v.AddArg(v2)
return true
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 4 && c&1 == 0
- // result: (Rsh32Ux64 <config.fe.TypeUInt32()> (Hmul32u <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(int32(1<<31+(umagic(32,c).m+1)/2))]) (Rsh32Ux64 <config.fe.TypeUInt32()> x (Const64 <config.fe.TypeUInt64()> [1]))) (Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-2]))
+ // result: (Rsh32Ux64 <fe.TypeUInt32()> (Hmul32u <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(int32(1<<31+(umagic(32,c).m+1)/2))]) (Rsh32Ux64 <fe.TypeUInt32()> x (Const64 <fe.TypeUInt64()> [1]))) (Const64 <fe.TypeUInt64()> [umagic(32,c).s-2]))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpRsh32Ux64)
- v.Type = config.fe.TypeUInt32()
- v0 := b.NewValue0(v.Pos, OpHmul32u, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v.Type = fe.TypeUInt32()
+ v0 := b.NewValue0(v.Pos, OpHmul32u, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v1.AuxInt = int64(int32(1<<31 + (umagic(32, c).m+1)/2))
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpRsh32Ux64, config.fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, fe.TypeUInt32())
v2.AddArg(x)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = 1
v2.AddArg(v3)
v0.AddArg(v2)
v.AddArg(v0)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = umagic(32, c).s - 2
v.AddArg(v4)
return true
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 4
- // result: (Rsh32Ux64 <config.fe.TypeUInt32()> (Avg32u x (Hmul32u <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(int32(umagic(32,c).m))]) x)) (Const64 <config.fe.TypeUInt64()> [umagic(32,c).s-1]))
+ // result: (Rsh32Ux64 <fe.TypeUInt32()> (Avg32u x (Hmul32u <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(int32(umagic(32,c).m))]) x)) (Const64 <fe.TypeUInt64()> [umagic(32,c).s-1]))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpRsh32Ux64)
- v.Type = config.fe.TypeUInt32()
- v0 := b.NewValue0(v.Pos, OpAvg32u, config.fe.TypeUInt32())
+ v.Type = fe.TypeUInt32()
+ v0 := b.NewValue0(v.Pos, OpAvg32u, fe.TypeUInt32())
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpHmul32u, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpHmul32u, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v2.AuxInt = int64(int32(umagic(32, c).m))
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = umagic(32, c).s - 1
v.AddArg(v3)
return true
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0
- // result: (Trunc64to32 (Rsh64Ux64 <config.fe.TypeUInt64()> (Mul64 <config.fe.TypeUInt64()> (Const64 <config.fe.TypeUInt64()> [int64(1<<31+umagic(32,c).m/2)]) (ZeroExt32to64 x)) (Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-1])))
+ // result: (Trunc64to32 (Rsh64Ux64 <fe.TypeUInt64()> (Mul64 <fe.TypeUInt64()> (Const64 <fe.TypeUInt64()> [int64(1<<31+umagic(32,c).m/2)]) (ZeroExt32to64 x)) (Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-1])))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpTrunc64to32)
- v0 := b.NewValue0(v.Pos, OpRsh64Ux64, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpMul64, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMul64, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = int64(1<<31 + umagic(32, c).m/2)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 32 + umagic(32, c).s - 1
v0.AddArg(v4)
v.AddArg(v0)
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 8 && c&1 == 0
- // result: (Trunc64to32 (Rsh64Ux64 <config.fe.TypeUInt64()> (Mul64 <config.fe.TypeUInt64()> (Const64 <config.fe.TypeUInt64()> [int64(1<<31+(umagic(32,c).m+1)/2)]) (Rsh64Ux64 <config.fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <config.fe.TypeUInt64()> [1]))) (Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-2])))
+ // result: (Trunc64to32 (Rsh64Ux64 <fe.TypeUInt64()> (Mul64 <fe.TypeUInt64()> (Const64 <fe.TypeUInt64()> [int64(1<<31+(umagic(32,c).m+1)/2)]) (Rsh64Ux64 <fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <fe.TypeUInt64()> [1]))) (Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-2])))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpTrunc64to32)
- v0 := b.NewValue0(v.Pos, OpRsh64Ux64, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpMul64, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpMul64, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = int64(1<<31 + (umagic(32, c).m+1)/2)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpRsh64Ux64, config.fe.TypeUInt64())
- v4 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpRsh64Ux64, fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v4.AddArg(x)
v3.AddArg(v4)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 1
v3.AddArg(v5)
v1.AddArg(v3)
v0.AddArg(v1)
- v6 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v6.AuxInt = 32 + umagic(32, c).s - 2
v0.AddArg(v6)
v.AddArg(v0)
}
// match: (Div32u x (Const32 [c]))
// cond: umagicOK(32, c) && config.RegSize == 8
- // result: (Trunc64to32 (Rsh64Ux64 <config.fe.TypeUInt64()> (Avg64u (Lsh64x64 <config.fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <config.fe.TypeUInt64()> [32])) (Mul64 <config.fe.TypeUInt64()> (Const64 <config.fe.TypeUInt32()> [int64(umagic(32,c).m)]) (ZeroExt32to64 x))) (Const64 <config.fe.TypeUInt64()> [32+umagic(32,c).s-1])))
+ // result: (Trunc64to32 (Rsh64Ux64 <fe.TypeUInt64()> (Avg64u (Lsh64x64 <fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <fe.TypeUInt64()> [32])) (Mul64 <fe.TypeUInt64()> (Const64 <fe.TypeUInt32()> [int64(umagic(32,c).m)]) (ZeroExt32to64 x))) (Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-1])))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpTrunc64to32)
- v0 := b.NewValue0(v.Pos, OpRsh64Ux64, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpAvg64u, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpLsh64x64, config.fe.TypeUInt64())
- v3 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpAvg64u, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpLsh64x64, fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v3.AddArg(x)
v2.AddArg(v3)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 32
v2.AddArg(v4)
v1.AddArg(v2)
- v5 := b.NewValue0(v.Pos, OpMul64, config.fe.TypeUInt64())
- v6 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt32())
+ v5 := b.NewValue0(v.Pos, OpMul64, fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt32())
v6.AuxInt = int64(umagic(32, c).m)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpZeroExt32to64, config.fe.TypeUInt64())
+ v7 := b.NewValue0(v.Pos, OpZeroExt32to64, fe.TypeUInt64())
v7.AddArg(x)
v5.AddArg(v7)
v1.AddArg(v5)
v0.AddArg(v1)
- v8 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v8 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v8.AuxInt = 32 + umagic(32, c).s - 1
v0.AddArg(v8)
v.AddArg(v0)
}
return false
}
-func rewriteValuegeneric_OpDiv64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpDiv64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div64 (Const64 [c]) (Const64 [d]))
// cond: d != 0
// result: (Const64 [c/d])
}
// match: (Div64 <t> x (Const64 [-1<<63]))
// cond:
- // result: (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <config.fe.TypeUInt64()> [63]))
+ // result: (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <fe.TypeUInt64()> [63]))
for {
t := v.Type
x := v.Args[0]
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 63
v.AddArg(v2)
return true
}
// match: (Div64 <t> n (Const64 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [63])) (Const64 <config.fe.TypeUInt64()> [64-log2(c)]))) (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <fe.TypeUInt64()> [63])) (Const64 <fe.TypeUInt64()> [64-log2(c)]))) (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
v2 := b.NewValue0(v.Pos, OpRsh64x64, t)
v2.AddArg(n)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = 63
v2.AddArg(v3)
v1.AddArg(v2)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 64 - log2(c)
v1.AddArg(v4)
v0.AddArg(v1)
v.AddArg(v0)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = log2(c)
v.AddArg(v5)
return true
}
// match: (Div64 <t> x (Const64 [c]))
// cond: smagicOK(64,c) && smagic(64,c).m&1 == 0
- // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <config.fe.TypeUInt64()> [int64(smagic(64,c).m/2)]) x) (Const64 <config.fe.TypeUInt64()> [smagic(64,c).s-1])) (Rsh64x64 <t> x (Const64 <config.fe.TypeUInt64()> [63])))
+ // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <fe.TypeUInt64()> [int64(smagic(64,c).m/2)]) x) (Const64 <fe.TypeUInt64()> [smagic(64,c).s-1])) (Rsh64x64 <t> x (Const64 <fe.TypeUInt64()> [63])))
for {
t := v.Type
x := v.Args[0]
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
v1 := b.NewValue0(v.Pos, OpHmul64, t)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = int64(smagic(64, c).m / 2)
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = smagic(64, c).s - 1
v0.AddArg(v3)
v.AddArg(v0)
v4 := b.NewValue0(v.Pos, OpRsh64x64, t)
v4.AddArg(x)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = 63
v4.AddArg(v5)
v.AddArg(v4)
}
// match: (Div64 <t> x (Const64 [c]))
// cond: smagicOK(64,c) && smagic(64,c).m&1 != 0
- // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <config.fe.TypeUInt64()> [int64(smagic(64,c).m)]) x) x) (Const64 <config.fe.TypeUInt64()> [smagic(64,c).s])) (Rsh64x64 <t> x (Const64 <config.fe.TypeUInt64()> [63])))
+ // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <fe.TypeUInt64()> [int64(smagic(64,c).m)]) x) x) (Const64 <fe.TypeUInt64()> [smagic(64,c).s])) (Rsh64x64 <t> x (Const64 <fe.TypeUInt64()> [63])))
for {
t := v.Type
x := v.Args[0]
v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
v1 := b.NewValue0(v.Pos, OpAdd64, t)
v2 := b.NewValue0(v.Pos, OpHmul64, t)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = int64(smagic(64, c).m)
v2.AddArg(v3)
v2.AddArg(x)
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = smagic(64, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
v5.AddArg(x)
- v6 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v6 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v6.AuxInt = 63
v5.AddArg(v6)
v.AddArg(v5)
}
return false
}
-func rewriteValuegeneric_OpDiv64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpDiv64F(v *Value) bool {
// match: (Div64F (Const64F [c]) (Const64F [d]))
// cond:
// result: (Const64F [f2i(i2f(c) / i2f(d))])
}
return false
}
-func rewriteValuegeneric_OpDiv64u(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpDiv64u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div64u (Const64 [c]) (Const64 [d]))
// cond: d != 0
// result: (Const64 [int64(uint64(c)/uint64(d))])
}
// match: (Div64u n (Const64 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh64Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Rsh64Ux64 n (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
n := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh64Ux64)
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Div64u x (Const64 [c]))
// cond: umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0
- // result: (Rsh64Ux64 <config.fe.TypeUInt64()> (Hmul64u <config.fe.TypeUInt64()> (Const64 <config.fe.TypeUInt64()> [int64(1<<63+umagic(64,c).m/2)]) x) (Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-1]))
+ // result: (Rsh64Ux64 <fe.TypeUInt64()> (Hmul64u <fe.TypeUInt64()> (Const64 <fe.TypeUInt64()> [int64(1<<63+umagic(64,c).m/2)]) x) (Const64 <fe.TypeUInt64()> [umagic(64,c).s-1]))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpRsh64Ux64)
- v.Type = config.fe.TypeUInt64()
- v0 := b.NewValue0(v.Pos, OpHmul64u, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v.Type = fe.TypeUInt64()
+ v0 := b.NewValue0(v.Pos, OpHmul64u, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v1.AuxInt = int64(1<<63 + umagic(64, c).m/2)
v0.AddArg(v1)
v0.AddArg(x)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = umagic(64, c).s - 1
v.AddArg(v2)
return true
}
// match: (Div64u x (Const64 [c]))
// cond: umagicOK(64, c) && config.RegSize == 8 && c&1 == 0
- // result: (Rsh64Ux64 <config.fe.TypeUInt64()> (Hmul64u <config.fe.TypeUInt64()> (Const64 <config.fe.TypeUInt64()> [int64(1<<63+(umagic(64,c).m+1)/2)]) (Rsh64Ux64 <config.fe.TypeUInt64()> x (Const64 <config.fe.TypeUInt64()> [1]))) (Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-2]))
+ // result: (Rsh64Ux64 <fe.TypeUInt64()> (Hmul64u <fe.TypeUInt64()> (Const64 <fe.TypeUInt64()> [int64(1<<63+(umagic(64,c).m+1)/2)]) (Rsh64Ux64 <fe.TypeUInt64()> x (Const64 <fe.TypeUInt64()> [1]))) (Const64 <fe.TypeUInt64()> [umagic(64,c).s-2]))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpRsh64Ux64)
- v.Type = config.fe.TypeUInt64()
- v0 := b.NewValue0(v.Pos, OpHmul64u, config.fe.TypeUInt64())
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v.Type = fe.TypeUInt64()
+ v0 := b.NewValue0(v.Pos, OpHmul64u, fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v1.AuxInt = int64(1<<63 + (umagic(64, c).m+1)/2)
v0.AddArg(v1)
- v2 := b.NewValue0(v.Pos, OpRsh64Ux64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, fe.TypeUInt64())
v2.AddArg(x)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = 1
v2.AddArg(v3)
v0.AddArg(v2)
v.AddArg(v0)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = umagic(64, c).s - 2
v.AddArg(v4)
return true
}
// match: (Div64u x (Const64 [c]))
// cond: umagicOK(64, c) && config.RegSize == 8
- // result: (Rsh64Ux64 <config.fe.TypeUInt64()> (Avg64u x (Hmul64u <config.fe.TypeUInt64()> (Const64 <config.fe.TypeUInt64()> [int64(umagic(64,c).m)]) x)) (Const64 <config.fe.TypeUInt64()> [umagic(64,c).s-1]))
+ // result: (Rsh64Ux64 <fe.TypeUInt64()> (Avg64u x (Hmul64u <fe.TypeUInt64()> (Const64 <fe.TypeUInt64()> [int64(umagic(64,c).m)]) x)) (Const64 <fe.TypeUInt64()> [umagic(64,c).s-1]))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpRsh64Ux64)
- v.Type = config.fe.TypeUInt64()
- v0 := b.NewValue0(v.Pos, OpAvg64u, config.fe.TypeUInt64())
+ v.Type = fe.TypeUInt64()
+ v0 := b.NewValue0(v.Pos, OpAvg64u, fe.TypeUInt64())
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpHmul64u, config.fe.TypeUInt64())
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpHmul64u, fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = int64(umagic(64, c).m)
v1.AddArg(v2)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = umagic(64, c).s - 1
v.AddArg(v3)
return true
}
return false
}
-func rewriteValuegeneric_OpDiv8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpDiv8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8 (Const8 [c]) (Const8 [d]))
// cond: d != 0
// result: (Const8 [int64(int8(c)/int8(d))])
}
// match: (Div8 <t> x (Const8 [-1<<7 ]))
// cond:
- // result: (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <config.fe.TypeUInt64()> [7 ]))
+ // result: (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <fe.TypeUInt64()> [7 ]))
for {
t := v.Type
x := v.Args[0]
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v2 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v2.AuxInt = 7
v.AddArg(v2)
return true
}
// match: (Div8 <t> n (Const8 [c]))
// cond: isPowerOfTwo(c)
- // result: (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [ 7])) (Const64 <config.fe.TypeUInt64()> [ 8-log2(c)]))) (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <fe.TypeUInt64()> [ 7])) (Const64 <fe.TypeUInt64()> [ 8-log2(c)]))) (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
v2 := b.NewValue0(v.Pos, OpRsh8x64, t)
v2.AddArg(n)
- v3 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v3 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v3.AuxInt = 7
v2.AddArg(v3)
v1.AddArg(v2)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 8 - log2(c)
v1.AddArg(v4)
v0.AddArg(v1)
v.AddArg(v0)
- v5 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v5 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v5.AuxInt = log2(c)
v.AddArg(v5)
return true
}
// match: (Div8 <t> x (Const8 [c]))
// cond: smagicOK(8,c)
- // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(smagic(8,c).m)]) (SignExt8to32 x)) (Const64 <config.fe.TypeUInt64()> [8+smagic(8,c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <config.fe.TypeUInt64()> [31])))
+ // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(smagic(8,c).m)]) (SignExt8to32 x)) (Const64 <fe.TypeUInt64()> [8+smagic(8,c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <fe.TypeUInt64()> [31])))
for {
t := v.Type
x := v.Args[0]
v.reset(OpSub8)
v.Type = t
v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
- v1 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMul32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v2.AuxInt = int64(smagic(8, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v3 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 8 + smagic(8, c).s
v0.AddArg(v4)
v.AddArg(v0)
v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
- v6 := b.NewValue0(v.Pos, OpSignExt8to32, config.fe.TypeInt32())
+ v6 := b.NewValue0(v.Pos, OpSignExt8to32, fe.TypeInt32())
v6.AddArg(x)
v5.AddArg(v6)
- v7 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v7 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v7.AuxInt = 31
v5.AddArg(v7)
v.AddArg(v5)
}
return false
}
-func rewriteValuegeneric_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpDiv8u(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Div8u (Const8 [c]) (Const8 [d]))
// cond: d != 0
// result: (Const8 [int64(int8(uint8(c)/uint8(d)))])
}
// match: (Div8u n (Const8 [c]))
// cond: isPowerOfTwo(c&0xff)
- // result: (Rsh8Ux64 n (Const64 <config.fe.TypeUInt64()> [log2(c&0xff)]))
+ // result: (Rsh8Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xff)]))
for {
n := v.Args[0]
v_1 := v.Args[1]
}
v.reset(OpRsh8Ux64)
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = log2(c & 0xff)
v.AddArg(v0)
return true
}
// match: (Div8u x (Const8 [c]))
// cond: umagicOK(8, c)
- // result: (Trunc32to8 (Rsh32Ux64 <config.fe.TypeUInt32()> (Mul32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [int64(1<<8+umagic(8,c).m)]) (ZeroExt8to32 x)) (Const64 <config.fe.TypeUInt64()> [8+umagic(8,c).s])))
+ // result: (Trunc32to8 (Rsh32Ux64 <fe.TypeUInt32()> (Mul32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [int64(1<<8+umagic(8,c).m)]) (ZeroExt8to32 x)) (Const64 <fe.TypeUInt64()> [8+umagic(8,c).s])))
for {
x := v.Args[0]
v_1 := v.Args[1]
break
}
v.reset(OpTrunc32to8)
- v0 := b.NewValue0(v.Pos, OpRsh32Ux64, config.fe.TypeUInt32())
- v1 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeUInt32())
- v2 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, fe.TypeUInt32())
+ v1 := b.NewValue0(v.Pos, OpMul32, fe.TypeUInt32())
+ v2 := b.NewValue0(v.Pos, OpConst32, fe.TypeUInt32())
v2.AuxInt = int64(1<<8 + umagic(8, c).m)
v1.AddArg(v2)
- v3 := b.NewValue0(v.Pos, OpZeroExt8to32, config.fe.TypeUInt32())
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, fe.TypeUInt32())
v3.AddArg(x)
v1.AddArg(v3)
v0.AddArg(v1)
- v4 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v4 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v4.AuxInt = 8 + umagic(8, c).s
v0.AddArg(v4)
v.AddArg(v0)
}
return false
}
-func rewriteValuegeneric_OpEq16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpEq16(v *Value) bool {
b := v.Block
_ = b
// match: (Eq16 x x)
}
return false
}
-func rewriteValuegeneric_OpEq32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpEq32(v *Value) bool {
b := v.Block
_ = b
// match: (Eq32 x x)
}
return false
}
-func rewriteValuegeneric_OpEq64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpEq64(v *Value) bool {
b := v.Block
_ = b
// match: (Eq64 x x)
}
return false
}
-func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpEq8(v *Value) bool {
b := v.Block
_ = b
// match: (Eq8 x x)
}
return false
}
-func rewriteValuegeneric_OpEqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpEqB(v *Value) bool {
// match: (EqB (ConstBool [c]) (ConstBool [d]))
// cond:
// result: (ConstBool [b2i(c == d)])
}
return false
}
-func rewriteValuegeneric_OpEqInter(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpEqInter(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqInter x y)
// cond:
// result: (EqPtr (ITab x) (ITab y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpEqPtr)
- v0 := b.NewValue0(v.Pos, OpITab, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpITab, fe.TypeBytePtr())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpITab, config.fe.TypeBytePtr())
+ v1 := b.NewValue0(v.Pos, OpITab, fe.TypeBytePtr())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuegeneric_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpEqPtr(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqPtr p (ConstNil))
// cond:
// result: (Not (IsNonNil p))
break
}
v.reset(OpNot)
- v0 := b.NewValue0(v.Pos, OpIsNonNil, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, fe.TypeBool())
v0.AddArg(p)
v.AddArg(v0)
return true
}
p := v.Args[1]
v.reset(OpNot)
- v0 := b.NewValue0(v.Pos, OpIsNonNil, config.fe.TypeBool())
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, fe.TypeBool())
v0.AddArg(p)
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpEqSlice(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpEqSlice(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (EqSlice x y)
// cond:
// result: (EqPtr (SlicePtr x) (SlicePtr y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpEqPtr)
- v0 := b.NewValue0(v.Pos, OpSlicePtr, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, fe.TypeBytePtr())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSlicePtr, config.fe.TypeBytePtr())
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, fe.TypeBytePtr())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuegeneric_OpGeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGeq16(v *Value) bool {
// match: (Geq16 (Const16 [c]) (Const16 [d]))
// cond:
// result: (ConstBool [b2i(c >= d)])
}
return false
}
-func rewriteValuegeneric_OpGeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGeq16U(v *Value) bool {
// match: (Geq16U (Const16 [c]) (Const16 [d]))
// cond:
// result: (ConstBool [b2i(uint16(c) >= uint16(d))])
}
return false
}
-func rewriteValuegeneric_OpGeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGeq32(v *Value) bool {
// match: (Geq32 (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(c >= d)])
}
return false
}
-func rewriteValuegeneric_OpGeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGeq32U(v *Value) bool {
// match: (Geq32U (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(uint32(c) >= uint32(d))])
}
return false
}
-func rewriteValuegeneric_OpGeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGeq64(v *Value) bool {
// match: (Geq64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (ConstBool [b2i(c >= d)])
}
return false
}
-func rewriteValuegeneric_OpGeq64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGeq64U(v *Value) bool {
// match: (Geq64U (Const64 [c]) (Const64 [d]))
// cond:
// result: (ConstBool [b2i(uint64(c) >= uint64(d))])
}
return false
}
-func rewriteValuegeneric_OpGeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGeq8(v *Value) bool {
// match: (Geq8 (Const8 [c]) (Const8 [d]))
// cond:
// result: (ConstBool [b2i(c >= d)])
}
return false
}
-func rewriteValuegeneric_OpGeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGeq8U(v *Value) bool {
// match: (Geq8U (Const8 [c]) (Const8 [d]))
// cond:
// result: (ConstBool [b2i(uint8(c) >= uint8(d))])
}
return false
}
-func rewriteValuegeneric_OpGreater16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGreater16(v *Value) bool {
// match: (Greater16 (Const16 [c]) (Const16 [d]))
// cond:
// result: (ConstBool [b2i(c > d)])
}
return false
}
-func rewriteValuegeneric_OpGreater16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGreater16U(v *Value) bool {
// match: (Greater16U (Const16 [c]) (Const16 [d]))
// cond:
// result: (ConstBool [b2i(uint16(c) > uint16(d))])
}
return false
}
-func rewriteValuegeneric_OpGreater32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGreater32(v *Value) bool {
// match: (Greater32 (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(c > d)])
}
return false
}
-func rewriteValuegeneric_OpGreater32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGreater32U(v *Value) bool {
// match: (Greater32U (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(uint32(c) > uint32(d))])
}
return false
}
-func rewriteValuegeneric_OpGreater64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGreater64(v *Value) bool {
// match: (Greater64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (ConstBool [b2i(c > d)])
}
return false
}
-func rewriteValuegeneric_OpGreater64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGreater64U(v *Value) bool {
// match: (Greater64U (Const64 [c]) (Const64 [d]))
// cond:
// result: (ConstBool [b2i(uint64(c) > uint64(d))])
}
return false
}
-func rewriteValuegeneric_OpGreater8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGreater8(v *Value) bool {
// match: (Greater8 (Const8 [c]) (Const8 [d]))
// cond:
// result: (ConstBool [b2i(c > d)])
}
return false
}
-func rewriteValuegeneric_OpGreater8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpGreater8U(v *Value) bool {
// match: (Greater8U (Const8 [c]) (Const8 [d]))
// cond:
// result: (ConstBool [b2i(uint8(c) > uint8(d))])
}
return false
}
-func rewriteValuegeneric_OpIMake(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpIMake(v *Value) bool {
// match: (IMake typ (StructMake1 val))
// cond:
// result: (IMake typ val)
}
return false
}
-func rewriteValuegeneric_OpInterCall(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpInterCall(v *Value) bool {
// match: (InterCall [argsize] (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem)
// cond: devirt(v, itab, off) != nil
// result: (StaticCall [argsize] {devirt(v, itab, off)} mem)
}
return false
}
-func rewriteValuegeneric_OpIsInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpIsInBounds(v *Value) bool {
// match: (IsInBounds (ZeroExt8to32 _) (Const32 [c]))
// cond: (1 << 8) <= c
// result: (ConstBool [1])
}
return false
}
-func rewriteValuegeneric_OpIsNonNil(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpIsNonNil(v *Value) bool {
// match: (IsNonNil (ConstNil))
// cond:
// result: (ConstBool [0])
}
return false
}
-func rewriteValuegeneric_OpIsSliceInBounds(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpIsSliceInBounds(v *Value) bool {
// match: (IsSliceInBounds x x)
// cond:
// result: (ConstBool [1])
}
return false
}
-func rewriteValuegeneric_OpLeq16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLeq16(v *Value) bool {
// match: (Leq16 (Const16 [c]) (Const16 [d]))
// cond:
// result: (ConstBool [b2i(c <= d)])
}
return false
}
-func rewriteValuegeneric_OpLeq16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLeq16U(v *Value) bool {
// match: (Leq16U (Const16 [c]) (Const16 [d]))
// cond:
// result: (ConstBool [b2i(uint16(c) <= uint16(d))])
}
return false
}
-func rewriteValuegeneric_OpLeq32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLeq32(v *Value) bool {
// match: (Leq32 (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(c <= d)])
}
return false
}
-func rewriteValuegeneric_OpLeq32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLeq32U(v *Value) bool {
// match: (Leq32U (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(uint32(c) <= uint32(d))])
}
return false
}
-func rewriteValuegeneric_OpLeq64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLeq64(v *Value) bool {
// match: (Leq64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (ConstBool [b2i(c <= d)])
}
return false
}
-func rewriteValuegeneric_OpLeq64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLeq64U(v *Value) bool {
// match: (Leq64U (Const64 [c]) (Const64 [d]))
// cond:
// result: (ConstBool [b2i(uint64(c) <= uint64(d))])
}
return false
}
-func rewriteValuegeneric_OpLeq8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLeq8(v *Value) bool {
// match: (Leq8 (Const8 [c]) (Const8 [d]))
// cond:
// result: (ConstBool [b2i(c <= d)])
}
return false
}
-func rewriteValuegeneric_OpLeq8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLeq8U(v *Value) bool {
// match: (Leq8U (Const8 [c]) (Const8 [d]))
// cond:
// result: (ConstBool [b2i(uint8(c) <= uint8(d))])
}
return false
}
-func rewriteValuegeneric_OpLess16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLess16(v *Value) bool {
// match: (Less16 (Const16 [c]) (Const16 [d]))
// cond:
// result: (ConstBool [b2i(c < d)])
}
return false
}
-func rewriteValuegeneric_OpLess16U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLess16U(v *Value) bool {
// match: (Less16U (Const16 [c]) (Const16 [d]))
// cond:
// result: (ConstBool [b2i(uint16(c) < uint16(d))])
}
return false
}
-func rewriteValuegeneric_OpLess32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLess32(v *Value) bool {
// match: (Less32 (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(c < d)])
}
return false
}
-func rewriteValuegeneric_OpLess32U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLess32U(v *Value) bool {
// match: (Less32U (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(uint32(c) < uint32(d))])
}
return false
}
-func rewriteValuegeneric_OpLess64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLess64(v *Value) bool {
// match: (Less64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (ConstBool [b2i(c < d)])
}
return false
}
-func rewriteValuegeneric_OpLess64U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLess64U(v *Value) bool {
// match: (Less64U (Const64 [c]) (Const64 [d]))
// cond:
// result: (ConstBool [b2i(uint64(c) < uint64(d))])
}
return false
}
-func rewriteValuegeneric_OpLess8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLess8(v *Value) bool {
// match: (Less8 (Const8 [c]) (Const8 [d]))
// cond:
// result: (ConstBool [b2i(c < d)])
}
return false
}
-func rewriteValuegeneric_OpLess8U(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpLess8U(v *Value) bool {
// match: (Less8U (Const8 [c]) (Const8 [d]))
// cond:
// result: (ConstBool [b2i(uint8(c) < uint8(d))])
}
return false
}
-func rewriteValuegeneric_OpLoad(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLoad(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Load <t1> p1 (Store {t2} p2 x _))
// cond: isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.Size() == t2.(Type).Size()
// result: x
return true
}
// match: (Load <t> _ _)
- // cond: t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
// result: (StructMake0)
for {
t := v.Type
- if !(t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake0)
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
// result: (StructMake1 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake1)
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
// result: (StructMake2 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake2)
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
// result: (StructMake3 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake3)
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)
+ // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
// result: (StructMake4 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem) (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
+ if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
break
}
v.reset(OpStructMake4)
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t)
+ // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
// result: (ArrayMake1 (Load <t.ElemType()> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t)) {
+ if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
break
}
v.reset(OpArrayMake1)
}
return false
}
-func rewriteValuegeneric_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh16x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh16x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh16x64 (Const16 [c]) (Const64 [d]))
// cond:
// result: (Const16 [int64(int16(c) << uint64(d))])
}
// match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Lsh16x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ // result: (Lsh16x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh16Ux64 {
}
v.reset(OpLsh16x64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh16x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh16x8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh32x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh32x64 (Const32 [c]) (Const64 [d]))
// cond:
// result: (Const32 [int64(int32(c) << uint64(d))])
}
// match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Lsh32x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ // result: (Lsh32x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh32Ux64 {
}
v.reset(OpLsh32x64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh32x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh32x8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh64x16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh64x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh64x32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh64x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh64x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh64x64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (Const64 [c << uint64(d)])
}
// match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ // result: (Lsh64x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh64Ux64 {
}
v.reset(OpLsh64x64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh64x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh64x8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh8x16(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh8x32(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh8x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Lsh8x64 (Const8 [c]) (Const64 [d]))
// cond:
// result: (Const8 [int64(int8(c) << uint64(d))])
}
// match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Lsh8x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ // result: (Lsh8x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpRsh8Ux64 {
}
v.reset(OpLsh8x64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpLsh8x8(v *Value) bool {
b := v.Block
_ = b
// match: (Lsh8x8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpMod16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMod16(v *Value) bool {
b := v.Block
_ = b
// match: (Mod16 (Const16 [c]) (Const16 [d]))
}
return false
}
-func rewriteValuegeneric_OpMod16u(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMod16u(v *Value) bool {
b := v.Block
_ = b
// match: (Mod16u (Const16 [c]) (Const16 [d]))
}
return false
}
-func rewriteValuegeneric_OpMod32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMod32(v *Value) bool {
b := v.Block
_ = b
// match: (Mod32 (Const32 [c]) (Const32 [d]))
}
return false
}
-func rewriteValuegeneric_OpMod32u(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMod32u(v *Value) bool {
b := v.Block
_ = b
// match: (Mod32u (Const32 [c]) (Const32 [d]))
}
return false
}
-func rewriteValuegeneric_OpMod64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMod64(v *Value) bool {
b := v.Block
_ = b
// match: (Mod64 (Const64 [c]) (Const64 [d]))
}
return false
}
-func rewriteValuegeneric_OpMod64u(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMod64u(v *Value) bool {
b := v.Block
_ = b
// match: (Mod64u (Const64 [c]) (Const64 [d]))
}
return false
}
-func rewriteValuegeneric_OpMod8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMod8(v *Value) bool {
b := v.Block
_ = b
// match: (Mod8 (Const8 [c]) (Const8 [d]))
}
return false
}
-func rewriteValuegeneric_OpMod8u(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMod8u(v *Value) bool {
b := v.Block
_ = b
// match: (Mod8u (Const8 [c]) (Const8 [d]))
}
return false
}
-func rewriteValuegeneric_OpMul16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMul16(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul16 (Const16 [c]) (Const16 [d]))
// cond:
// result: (Const16 [int64(int16(c*d))])
}
// match: (Mul16 <t> n (Const16 [c]))
// cond: isPowerOfTwo(c)
- // result: (Lsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Lsh16x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
v.reset(OpLsh16x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul16 <t> n (Const16 [c]))
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg16 (Lsh16x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
+ // result: (Neg16 (Lsh16x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
for {
t := v.Type
n := v.Args[0]
v.reset(OpNeg16)
v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
}
return false
}
-func rewriteValuegeneric_OpMul32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMul32(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul32 (Const32 [c]) (Const32 [d]))
// cond:
// result: (Const32 [int64(int32(c*d))])
}
// match: (Mul32 <t> n (Const32 [c]))
// cond: isPowerOfTwo(c)
- // result: (Lsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Lsh32x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
v.reset(OpLsh32x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul32 <t> n (Const32 [c]))
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg32 (Lsh32x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
+ // result: (Neg32 (Lsh32x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
for {
t := v.Type
n := v.Args[0]
v.reset(OpNeg32)
v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
}
return false
}
-func rewriteValuegeneric_OpMul32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpMul32F(v *Value) bool {
// match: (Mul32F (Const32F [c]) (Const32F [d]))
// cond:
// result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
}
return false
}
-func rewriteValuegeneric_OpMul64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMul64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (Const64 [c*d])
}
// match: (Mul64 <t> n (Const64 [c]))
// cond: isPowerOfTwo(c)
- // result: (Lsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Lsh64x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
v.reset(OpLsh64x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul64 <t> n (Const64 [c]))
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg64 (Lsh64x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
+ // result: (Neg64 (Lsh64x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
for {
t := v.Type
n := v.Args[0]
v.reset(OpNeg64)
v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
}
return false
}
-func rewriteValuegeneric_OpMul64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpMul64F(v *Value) bool {
// match: (Mul64F (Const64F [c]) (Const64F [d]))
// cond:
// result: (Const64F [f2i(i2f(c) * i2f(d))])
}
return false
}
-func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpMul8(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Mul8 (Const8 [c]) (Const8 [d]))
// cond:
// result: (Const8 [int64(int8(c*d))])
}
// match: (Mul8 <t> n (Const8 [c]))
// cond: isPowerOfTwo(c)
- // result: (Lsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(c)]))
+ // result: (Lsh8x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
for {
t := v.Type
n := v.Args[0]
v.reset(OpLsh8x64)
v.Type = t
v.AddArg(n)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = log2(c)
v.AddArg(v0)
return true
}
// match: (Mul8 <t> n (Const8 [c]))
// cond: t.IsSigned() && isPowerOfTwo(-c)
- // result: (Neg8 (Lsh8x64 <t> n (Const64 <config.fe.TypeUInt64()> [log2(-c)])))
+ // result: (Neg8 (Lsh8x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
for {
t := v.Type
n := v.Args[0]
v.reset(OpNeg8)
v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
v0.AddArg(n)
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v1.AuxInt = log2(-c)
v0.AddArg(v1)
v.AddArg(v0)
}
return false
}
-func rewriteValuegeneric_OpNeg16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNeg16(v *Value) bool {
// match: (Neg16 (Const16 [c]))
// cond:
// result: (Const16 [int64(-int16(c))])
}
return false
}
-func rewriteValuegeneric_OpNeg32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNeg32(v *Value) bool {
// match: (Neg32 (Const32 [c]))
// cond:
// result: (Const32 [int64(-int32(c))])
}
return false
}
-func rewriteValuegeneric_OpNeg32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNeg32F(v *Value) bool {
// match: (Neg32F (Const32F [c]))
// cond: i2f(c) != 0
// result: (Const32F [f2i(-i2f(c))])
}
return false
}
-func rewriteValuegeneric_OpNeg64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNeg64(v *Value) bool {
// match: (Neg64 (Const64 [c]))
// cond:
// result: (Const64 [-c])
}
return false
}
-func rewriteValuegeneric_OpNeg64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNeg64F(v *Value) bool {
// match: (Neg64F (Const64F [c]))
// cond: i2f(c) != 0
// result: (Const64F [f2i(-i2f(c))])
}
return false
}
-func rewriteValuegeneric_OpNeg8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNeg8(v *Value) bool {
// match: (Neg8 (Const8 [c]))
// cond:
// result: (Const8 [int64( -int8(c))])
}
return false
}
-func rewriteValuegeneric_OpNeq16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpNeq16(v *Value) bool {
b := v.Block
_ = b
// match: (Neq16 x x)
}
return false
}
-func rewriteValuegeneric_OpNeq32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpNeq32(v *Value) bool {
b := v.Block
_ = b
// match: (Neq32 x x)
}
return false
}
-func rewriteValuegeneric_OpNeq64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpNeq64(v *Value) bool {
b := v.Block
_ = b
// match: (Neq64 x x)
}
return false
}
-func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpNeq8(v *Value) bool {
b := v.Block
_ = b
// match: (Neq8 x x)
}
return false
}
-func rewriteValuegeneric_OpNeqB(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNeqB(v *Value) bool {
// match: (NeqB (ConstBool [c]) (ConstBool [d]))
// cond:
// result: (ConstBool [b2i(c != d)])
}
return false
}
-func rewriteValuegeneric_OpNeqInter(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpNeqInter(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (NeqInter x y)
// cond:
// result: (NeqPtr (ITab x) (ITab y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpNeqPtr)
- v0 := b.NewValue0(v.Pos, OpITab, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpITab, fe.TypeBytePtr())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpITab, config.fe.TypeBytePtr())
+ v1 := b.NewValue0(v.Pos, OpITab, fe.TypeBytePtr())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuegeneric_OpNeqPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNeqPtr(v *Value) bool {
// match: (NeqPtr p (ConstNil))
// cond:
// result: (IsNonNil p)
}
return false
}
-func rewriteValuegeneric_OpNeqSlice(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpNeqSlice(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (NeqSlice x y)
// cond:
// result: (NeqPtr (SlicePtr x) (SlicePtr y))
x := v.Args[0]
y := v.Args[1]
v.reset(OpNeqPtr)
- v0 := b.NewValue0(v.Pos, OpSlicePtr, config.fe.TypeBytePtr())
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, fe.TypeBytePtr())
v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpSlicePtr, config.fe.TypeBytePtr())
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, fe.TypeBytePtr())
v1.AddArg(y)
v.AddArg(v1)
return true
}
}
-func rewriteValuegeneric_OpNilCheck(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpNilCheck(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (NilCheck (GetG mem) mem)
// cond:
// result: mem
return true
}
// match: (NilCheck (Load (OffPtr [c] (SP)) mem) mem)
- // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(config.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
+ // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
// result: (Invalid)
for {
v_0 := v.Args[0]
if mem != v.Args[1] {
break
}
- if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(config.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) {
+ if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) {
break
}
v.reset(OpInvalid)
return true
}
// match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem)) mem)
- // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(config.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
+ // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
// result: (Invalid)
for {
v_0 := v.Args[0]
if mem != v.Args[1] {
break
}
- if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(config.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) {
+ if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) {
break
}
v.reset(OpInvalid)
}
return false
}
-func rewriteValuegeneric_OpNot(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpNot(v *Value) bool {
// match: (Not (Eq64 x y))
// cond:
// result: (Neq64 x y)
}
return false
}
-func rewriteValuegeneric_OpOffPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpOffPtr(v *Value) bool {
// match: (OffPtr (OffPtr p [b]) [a])
// cond:
// result: (OffPtr p [a+b])
}
return false
}
-func rewriteValuegeneric_OpOr16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpOr16(v *Value) bool {
b := v.Block
_ = b
// match: (Or16 (Const16 [c]) (Const16 [d]))
}
return false
}
-func rewriteValuegeneric_OpOr32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpOr32(v *Value) bool {
b := v.Block
_ = b
// match: (Or32 (Const32 [c]) (Const32 [d]))
}
return false
}
-func rewriteValuegeneric_OpOr64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpOr64(v *Value) bool {
b := v.Block
_ = b
// match: (Or64 (Const64 [c]) (Const64 [d]))
}
return false
}
-func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpOr8(v *Value) bool {
b := v.Block
_ = b
// match: (Or8 (Const8 [c]) (Const8 [d]))
}
return false
}
-func rewriteValuegeneric_OpPhi(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpPhi(v *Value) bool {
// match: (Phi (Const8 [c]) (Const8 [c]))
// cond:
// result: (Const8 [c])
}
return false
}
-func rewriteValuegeneric_OpPtrIndex(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpPtrIndex(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (PtrIndex <t> ptr idx)
// cond: config.PtrSize == 4
- // result: (AddPtr ptr (Mul32 <config.fe.TypeInt()> idx (Const32 <config.fe.TypeInt()> [t.ElemType().Size()])))
+ // result: (AddPtr ptr (Mul32 <fe.TypeInt()> idx (Const32 <fe.TypeInt()> [t.ElemType().Size()])))
for {
t := v.Type
ptr := v.Args[0]
}
v.reset(OpAddPtr)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMul32, config.fe.TypeInt())
+ v0 := b.NewValue0(v.Pos, OpMul32, fe.TypeInt())
v0.AddArg(idx)
- v1 := b.NewValue0(v.Pos, OpConst32, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpConst32, fe.TypeInt())
v1.AuxInt = t.ElemType().Size()
v0.AddArg(v1)
v.AddArg(v0)
}
// match: (PtrIndex <t> ptr idx)
// cond: config.PtrSize == 8
- // result: (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.ElemType().Size()])))
+ // result: (AddPtr ptr (Mul64 <fe.TypeInt()> idx (Const64 <fe.TypeInt()> [t.ElemType().Size()])))
for {
t := v.Type
ptr := v.Args[0]
}
v.reset(OpAddPtr)
v.AddArg(ptr)
- v0 := b.NewValue0(v.Pos, OpMul64, config.fe.TypeInt())
+ v0 := b.NewValue0(v.Pos, OpMul64, fe.TypeInt())
v0.AddArg(idx)
- v1 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeInt())
+ v1 := b.NewValue0(v.Pos, OpConst64, fe.TypeInt())
v1.AuxInt = t.ElemType().Size()
v0.AddArg(v1)
v.AddArg(v0)
}
return false
}
-func rewriteValuegeneric_OpRound32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpRound32F(v *Value) bool {
// match: (Round32F x:(Const32F))
// cond:
// result: x
}
return false
}
-func rewriteValuegeneric_OpRound64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpRound64F(v *Value) bool {
// match: (Round64F x:(Const64F))
// cond:
// result: x
}
return false
}
-func rewriteValuegeneric_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh16Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16Ux64 (Const16 [c]) (Const64 [d]))
// cond:
// result: (Const16 [int64(int16(uint16(c) >> uint64(d)))])
}
// match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Rsh16Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ // result: (Rsh16Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x64 {
}
v.reset(OpRsh16Ux64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
// cond:
- // result: (ZeroExt8to16 (Trunc16to8 <config.fe.TypeUInt8()> x))
+ // result: (ZeroExt8to16 (Trunc16to8 <fe.TypeUInt8()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x64 {
break
}
v.reset(OpZeroExt8to16)
- v0 := b.NewValue0(v.Pos, OpTrunc16to8, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, fe.TypeUInt8())
v0.AddArg(x)
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16Ux8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh16x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh16x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh16x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh16x64 (Const16 [c]) (Const64 [d]))
// cond:
// result: (Const16 [int64(int16(c) >> uint64(d))])
}
// match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
// cond:
- // result: (SignExt8to16 (Trunc16to8 <config.fe.TypeInt8()> x))
+ // result: (SignExt8to16 (Trunc16to8 <fe.TypeInt8()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh16x64 {
break
}
v.reset(OpSignExt8to16)
- v0 := b.NewValue0(v.Pos, OpTrunc16to8, config.fe.TypeInt8())
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, fe.TypeInt8())
v0.AddArg(x)
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh16x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh16x8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32Ux64 (Const32 [c]) (Const64 [d]))
// cond:
// result: (Const32 [int64(int32(uint32(c) >> uint64(d)))])
}
// match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Rsh32Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ // result: (Rsh32Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
}
v.reset(OpRsh32Ux64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
// cond:
- // result: (ZeroExt8to32 (Trunc32to8 <config.fe.TypeUInt8()> x))
+ // result: (ZeroExt8to32 (Trunc32to8 <fe.TypeUInt8()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
break
}
v.reset(OpZeroExt8to32)
- v0 := b.NewValue0(v.Pos, OpTrunc32to8, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, fe.TypeUInt8())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
// cond:
- // result: (ZeroExt16to32 (Trunc32to16 <config.fe.TypeUInt16()> x))
+ // result: (ZeroExt16to32 (Trunc32to16 <fe.TypeUInt16()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
break
}
v.reset(OpZeroExt16to32)
- v0 := b.NewValue0(v.Pos, OpTrunc32to16, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, fe.TypeUInt16())
v0.AddArg(x)
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32Ux8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh32x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh32x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh32x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh32x64 (Const32 [c]) (Const64 [d]))
// cond:
// result: (Const32 [int64(int32(c) >> uint64(d))])
}
// match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
// cond:
- // result: (SignExt8to32 (Trunc32to8 <config.fe.TypeInt8()> x))
+ // result: (SignExt8to32 (Trunc32to8 <fe.TypeInt8()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
break
}
v.reset(OpSignExt8to32)
- v0 := b.NewValue0(v.Pos, OpTrunc32to8, config.fe.TypeInt8())
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, fe.TypeInt8())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
// cond:
- // result: (SignExt16to32 (Trunc32to16 <config.fe.TypeInt16()> x))
+ // result: (SignExt16to32 (Trunc32to16 <fe.TypeInt16()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh32x64 {
break
}
v.reset(OpSignExt16to32)
- v0 := b.NewValue0(v.Pos, OpTrunc32to16, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, fe.TypeInt16())
v0.AddArg(x)
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh32x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh32x8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64Ux64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (Const64 [int64(uint64(c) >> uint64(d))])
}
// match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ // result: (Rsh64Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
}
v.reset(OpRsh64Ux64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
// cond:
- // result: (ZeroExt8to64 (Trunc64to8 <config.fe.TypeUInt8()> x))
+ // result: (ZeroExt8to64 (Trunc64to8 <fe.TypeUInt8()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v.reset(OpZeroExt8to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to8, config.fe.TypeUInt8())
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, fe.TypeUInt8())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
// cond:
- // result: (ZeroExt16to64 (Trunc64to16 <config.fe.TypeUInt16()> x))
+ // result: (ZeroExt16to64 (Trunc64to16 <fe.TypeUInt16()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v.reset(OpZeroExt16to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to16, config.fe.TypeUInt16())
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, fe.TypeUInt16())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
// cond:
- // result: (ZeroExt32to64 (Trunc64to32 <config.fe.TypeUInt32()> x))
+ // result: (ZeroExt32to64 (Trunc64to32 <fe.TypeUInt32()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v.reset(OpZeroExt32to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to32, config.fe.TypeUInt32())
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, fe.TypeUInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64Ux8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh64x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh64x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh64x64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh64x64 (Const64 [c]) (Const64 [d]))
// cond:
// result: (Const64 [c >> uint64(d)])
}
// match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
// cond:
- // result: (SignExt8to64 (Trunc64to8 <config.fe.TypeInt8()> x))
+ // result: (SignExt8to64 (Trunc64to8 <fe.TypeInt8()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v.reset(OpSignExt8to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to8, config.fe.TypeInt8())
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, fe.TypeInt8())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
// cond:
- // result: (SignExt16to64 (Trunc64to16 <config.fe.TypeInt16()> x))
+ // result: (SignExt16to64 (Trunc64to16 <fe.TypeInt16()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v.reset(OpSignExt16to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to16, config.fe.TypeInt16())
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, fe.TypeInt16())
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
// cond:
- // result: (SignExt32to64 (Trunc64to32 <config.fe.TypeInt32()> x))
+ // result: (SignExt32to64 (Trunc64to32 <fe.TypeInt32()> x))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh64x64 {
break
}
v.reset(OpSignExt32to64)
- v0 := b.NewValue0(v.Pos, OpTrunc64to32, config.fe.TypeInt32())
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, fe.TypeInt32())
v0.AddArg(x)
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh64x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh64x8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Rsh8Ux64 (Const8 [c]) (Const64 [d]))
// cond:
// result: (Const8 [int64(int8(uint8(c) >> uint64(d)))])
}
// match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
- // result: (Rsh8Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
+ // result: (Rsh8Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
for {
v_0 := v.Args[0]
if v_0.Op != OpLsh8x64 {
}
v.reset(OpRsh8Ux64)
v.AddArg(x)
- v0 := b.NewValue0(v.Pos, OpConst64, config.fe.TypeUInt64())
+ v0 := b.NewValue0(v.Pos, OpConst64, fe.TypeUInt64())
v0.AuxInt = c1 - c2 + c3
v.AddArg(v0)
return true
}
return false
}
-func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8Ux8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh8x16(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x16 <t> x (Const16 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh8x32(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x32 <t> x (Const32 [c]))
}
return false
}
-func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh8x64(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x64 (Const8 [c]) (Const64 [d]))
}
return false
}
-func rewriteValuegeneric_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpRsh8x8(v *Value) bool {
b := v.Block
_ = b
// match: (Rsh8x8 <t> x (Const8 [c]))
}
return false
}
-func rewriteValuegeneric_OpSignExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSignExt16to32(v *Value) bool {
// match: (SignExt16to32 (Const16 [c]))
// cond:
// result: (Const32 [int64( int16(c))])
}
return false
}
-func rewriteValuegeneric_OpSignExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSignExt16to64(v *Value) bool {
// match: (SignExt16to64 (Const16 [c]))
// cond:
// result: (Const64 [int64( int16(c))])
}
return false
}
-func rewriteValuegeneric_OpSignExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSignExt32to64(v *Value) bool {
// match: (SignExt32to64 (Const32 [c]))
// cond:
// result: (Const64 [int64( int32(c))])
}
return false
}
-func rewriteValuegeneric_OpSignExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSignExt8to16(v *Value) bool {
// match: (SignExt8to16 (Const8 [c]))
// cond:
// result: (Const16 [int64( int8(c))])
}
return false
}
-func rewriteValuegeneric_OpSignExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSignExt8to32(v *Value) bool {
// match: (SignExt8to32 (Const8 [c]))
// cond:
// result: (Const32 [int64( int8(c))])
}
return false
}
-func rewriteValuegeneric_OpSignExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSignExt8to64(v *Value) bool {
// match: (SignExt8to64 (Const8 [c]))
// cond:
// result: (Const64 [int64( int8(c))])
}
return false
}
-func rewriteValuegeneric_OpSliceCap(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSliceCap(v *Value) bool {
// match: (SliceCap (SliceMake _ _ (Const64 <t> [c])))
// cond:
// result: (Const64 <t> [c])
}
return false
}
-func rewriteValuegeneric_OpSliceLen(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSliceLen(v *Value) bool {
// match: (SliceLen (SliceMake _ (Const64 <t> [c]) _))
// cond:
// result: (Const64 <t> [c])
}
return false
}
-func rewriteValuegeneric_OpSlicePtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSlicePtr(v *Value) bool {
// match: (SlicePtr (SliceMake (SlicePtr x) _ _))
// cond:
// result: (SlicePtr x)
}
return false
}
-func rewriteValuegeneric_OpSlicemask(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSlicemask(v *Value) bool {
// match: (Slicemask (Const32 [x]))
// cond: x > 0
// result: (Const32 [-1])
}
return false
}
-func rewriteValuegeneric_OpSqrt(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSqrt(v *Value) bool {
// match: (Sqrt (Const64F [c]))
// cond:
// result: (Const64F [f2i(math.Sqrt(i2f(c)))])
}
return false
}
-func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpStore(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (Store _ (StructMake0) mem)
// cond:
// result: mem
return true
}
// match: (Store {t} dst (Load src mem) mem)
- // cond: !config.fe.CanSSA(t.(Type))
+ // cond: !fe.CanSSA(t.(Type))
// result: (Move {t} [t.(Type).Size()] dst src mem)
for {
t := v.Aux
if mem != v.Args[2] {
break
}
- if !(!config.fe.CanSSA(t.(Type))) {
+ if !(!fe.CanSSA(t.(Type))) {
break
}
v.reset(OpMove)
return true
}
// match: (Store {t} dst (Load src mem) (VarDef {x} mem))
- // cond: !config.fe.CanSSA(t.(Type))
+ // cond: !fe.CanSSA(t.(Type))
// result: (Move {t} [t.(Type).Size()] dst src (VarDef {x} mem))
for {
t := v.Aux
if mem != v_2.Args[0] {
break
}
- if !(!config.fe.CanSSA(t.(Type))) {
+ if !(!fe.CanSSA(t.(Type))) {
break
}
v.reset(OpMove)
}
return false
}
-func rewriteValuegeneric_OpStringLen(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpStringLen(v *Value) bool {
// match: (StringLen (StringMake _ (Const64 <t> [c])))
// cond:
// result: (Const64 <t> [c])
}
return false
}
-func rewriteValuegeneric_OpStringPtr(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpStringPtr(v *Value) bool {
// match: (StringPtr (StringMake (Const64 <t> [c]) _))
// cond:
// result: (Const64 <t> [c])
}
return false
}
-func rewriteValuegeneric_OpStructSelect(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpStructSelect(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
// match: (StructSelect (StructMake1 x))
// cond:
// result: x
return true
}
// match: (StructSelect [i] x:(Load <t> ptr mem))
- // cond: !config.fe.CanSSA(t)
+ // cond: !fe.CanSSA(t)
// result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
for {
i := v.AuxInt
t := x.Type
ptr := x.Args[0]
mem := x.Args[1]
- if !(!config.fe.CanSSA(t)) {
+ if !(!fe.CanSSA(t)) {
break
}
b = x.Block
}
return false
}
-func rewriteValuegeneric_OpSub16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpSub16(v *Value) bool {
b := v.Block
_ = b
// match: (Sub16 (Const16 [c]) (Const16 [d]))
}
return false
}
-func rewriteValuegeneric_OpSub32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpSub32(v *Value) bool {
b := v.Block
_ = b
// match: (Sub32 (Const32 [c]) (Const32 [d]))
}
return false
}
-func rewriteValuegeneric_OpSub32F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSub32F(v *Value) bool {
// match: (Sub32F (Const32F [c]) (Const32F [d]))
// cond:
// result: (Const32F [f2i(float64(i2f32(c) - i2f32(d)))])
}
return false
}
-func rewriteValuegeneric_OpSub64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpSub64(v *Value) bool {
b := v.Block
_ = b
// match: (Sub64 (Const64 [c]) (Const64 [d]))
}
return false
}
-func rewriteValuegeneric_OpSub64F(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpSub64F(v *Value) bool {
// match: (Sub64F (Const64F [c]) (Const64F [d]))
// cond:
// result: (Const64F [f2i(i2f(c) - i2f(d))])
}
return false
}
-func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpSub8(v *Value) bool {
b := v.Block
_ = b
// match: (Sub8 (Const8 [c]) (Const8 [d]))
}
return false
}
-func rewriteValuegeneric_OpTrunc16to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpTrunc16to8(v *Value) bool {
// match: (Trunc16to8 (Const16 [c]))
// cond:
// result: (Const8 [int64(int8(c))])
}
return false
}
-func rewriteValuegeneric_OpTrunc32to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpTrunc32to16(v *Value) bool {
// match: (Trunc32to16 (Const32 [c]))
// cond:
// result: (Const16 [int64(int16(c))])
}
return false
}
-func rewriteValuegeneric_OpTrunc32to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpTrunc32to8(v *Value) bool {
// match: (Trunc32to8 (Const32 [c]))
// cond:
// result: (Const8 [int64(int8(c))])
}
return false
}
-func rewriteValuegeneric_OpTrunc64to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpTrunc64to16(v *Value) bool {
// match: (Trunc64to16 (Const64 [c]))
// cond:
// result: (Const16 [int64(int16(c))])
}
return false
}
-func rewriteValuegeneric_OpTrunc64to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpTrunc64to32(v *Value) bool {
// match: (Trunc64to32 (Const64 [c]))
// cond:
// result: (Const32 [int64(int32(c))])
}
return false
}
-func rewriteValuegeneric_OpTrunc64to8(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpTrunc64to8(v *Value) bool {
// match: (Trunc64to8 (Const64 [c]))
// cond:
// result: (Const8 [int64(int8(c))])
}
return false
}
-func rewriteValuegeneric_OpXor16(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpXor16(v *Value) bool {
b := v.Block
_ = b
// match: (Xor16 (Const16 [c]) (Const16 [d]))
}
return false
}
-func rewriteValuegeneric_OpXor32(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpXor32(v *Value) bool {
b := v.Block
_ = b
// match: (Xor32 (Const32 [c]) (Const32 [d]))
}
return false
}
-func rewriteValuegeneric_OpXor64(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpXor64(v *Value) bool {
b := v.Block
_ = b
// match: (Xor64 (Const64 [c]) (Const64 [d]))
}
return false
}
-func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpXor8(v *Value) bool {
b := v.Block
_ = b
// match: (Xor8 (Const8 [c]) (Const8 [d]))
}
return false
}
-func rewriteValuegeneric_OpZero(v *Value, config *Config) bool {
+func rewriteValuegeneric_OpZero(v *Value) bool {
b := v.Block
_ = b
+ config := b.Func.Config
+ _ = config
// match: (Zero (Load (OffPtr [c] (SP)) mem) mem)
// cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize
// result: mem
}
return false
}
-func rewriteValuegeneric_OpZeroExt16to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpZeroExt16to32(v *Value) bool {
// match: (ZeroExt16to32 (Const16 [c]))
// cond:
// result: (Const32 [int64(uint16(c))])
}
return false
}
-func rewriteValuegeneric_OpZeroExt16to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpZeroExt16to64(v *Value) bool {
// match: (ZeroExt16to64 (Const16 [c]))
// cond:
// result: (Const64 [int64(uint16(c))])
}
return false
}
-func rewriteValuegeneric_OpZeroExt32to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpZeroExt32to64(v *Value) bool {
// match: (ZeroExt32to64 (Const32 [c]))
// cond:
// result: (Const64 [int64(uint32(c))])
}
return false
}
-func rewriteValuegeneric_OpZeroExt8to16(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool {
// match: (ZeroExt8to16 (Const8 [c]))
// cond:
// result: (Const16 [int64( uint8(c))])
}
return false
}
-func rewriteValuegeneric_OpZeroExt8to32(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool {
// match: (ZeroExt8to32 (Const8 [c]))
// cond:
// result: (Const32 [int64( uint8(c))])
}
return false
}
-func rewriteValuegeneric_OpZeroExt8to64(v *Value, config *Config) bool {
- b := v.Block
- _ = b
+func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool {
// match: (ZeroExt8to64 (Const8 [c]))
// cond:
// result: (Const64 [int64( uint8(c))])
}
return false
}
-func rewriteBlockgeneric(b *Block, config *Config) bool {
+func rewriteBlockgeneric(b *Block) bool {
+ config := b.Func.Config
+ _ = config
+ fe := config.fe
+ _ = fe
switch b.Kind {
case BlockIf:
// match: (If (Not cond) yes no)