(CSEL0 {arm64Negate(boolval.Op)} x flagArg(boolval))
// absorb shifts into ops
-(NEG x:(SLLconst [c] y)) && clobberIfDead(x) -> (NEGshiftLL [c] y)
-(NEG x:(SRLconst [c] y)) && clobberIfDead(x) -> (NEGshiftRL [c] y)
-(NEG x:(SRAconst [c] y)) && clobberIfDead(x) -> (NEGshiftRA [c] y)
-(MVN x:(SLLconst [c] y)) && clobberIfDead(x) -> (MVNshiftLL [c] y)
-(MVN x:(SRLconst [c] y)) && clobberIfDead(x) -> (MVNshiftRL [c] y)
-(MVN x:(SRAconst [c] y)) && clobberIfDead(x) -> (MVNshiftRA [c] y)
-(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftLL x0 y [c])
-(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRL x0 y [c])
-(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRA x0 y [c])
-(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (SUBshiftLL x0 y [c])
-(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (SUBshiftRL x0 y [c])
-(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (SUBshiftRA x0 y [c])
-(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ANDshiftLL x0 y [c])
-(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ANDshiftRL x0 y [c])
-(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ANDshiftRA x0 y [c])
-(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ORshiftLL x0 y [c]) // useful for combined load
-(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ORshiftRL x0 y [c])
-(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ORshiftRA x0 y [c])
-(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (XORshiftLL x0 y [c])
-(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (XORshiftRL x0 y [c])
-(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (XORshiftRA x0 y [c])
-(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (BICshiftLL x0 y [c])
-(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (BICshiftRL x0 y [c])
-(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (BICshiftRA x0 y [c])
-(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ORNshiftLL x0 y [c])
-(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ORNshiftRL x0 y [c])
-(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ORNshiftRA x0 y [c])
-(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (EONshiftLL x0 y [c])
-(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (EONshiftRL x0 y [c])
-(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (EONshiftRA x0 y [c])
-(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (CMPshiftLL x0 y [c])
-(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftLL x1 y [c]))
-(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRL x0 y [c])
-(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRL x1 y [c]))
-(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRA x0 y [c])
-(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRA x1 y [c]))
-(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftLL x0 y [c])
-(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRL x0 y [c])
-(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRA x0 y [c])
-(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftLL x0 y [c])
-(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRL x0 y [c])
-(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRA x0 y [c])
+(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
+(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
+(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
+(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
+(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
+(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
+(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
+(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
+(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
+(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
+(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
+(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
+(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
+(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
+(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
+(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load
+(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c])
+(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c])
+(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
+(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
+(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
+(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
+(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
+(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
+(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
+(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
+(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
+(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
+(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
+(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
+(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
+(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
+(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
+(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
+(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
+(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
+(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
+(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
+(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
+(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
+(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
+(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
// prefer *const ops to *shift ops
-(ADDshiftLL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
-(ADDshiftRL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRLconst <x.Type> x [d]))
-(ADDshiftRA (MOVDconst [c]) x [d]) -> (ADDconst [c] (SRAconst <x.Type> x [d]))
-(ANDshiftLL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SLLconst <x.Type> x [d]))
-(ANDshiftRL (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRLconst <x.Type> x [d]))
-(ANDshiftRA (MOVDconst [c]) x [d]) -> (ANDconst [c] (SRAconst <x.Type> x [d]))
-(ORshiftLL (MOVDconst [c]) x [d]) -> (ORconst [c] (SLLconst <x.Type> x [d]))
-(ORshiftRL (MOVDconst [c]) x [d]) -> (ORconst [c] (SRLconst <x.Type> x [d]))
-(ORshiftRA (MOVDconst [c]) x [d]) -> (ORconst [c] (SRAconst <x.Type> x [d]))
-(XORshiftLL (MOVDconst [c]) x [d]) -> (XORconst [c] (SLLconst <x.Type> x [d]))
-(XORshiftRL (MOVDconst [c]) x [d]) -> (XORconst [c] (SRLconst <x.Type> x [d]))
-(XORshiftRA (MOVDconst [c]) x [d]) -> (XORconst [c] (SRAconst <x.Type> x [d]))
-(CMPshiftLL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
-(CMPshiftRL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
-(CMPshiftRA (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
-(CMNshiftLL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SLLconst <x.Type> x [d]))
-(CMNshiftRL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRLconst <x.Type> x [d]))
-(CMNshiftRA (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRAconst <x.Type> x [d]))
-(TSTshiftLL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SLLconst <x.Type> x [d]))
-(TSTshiftRL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRLconst <x.Type> x [d]))
-(TSTshiftRA (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRAconst <x.Type> x [d]))
+(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
// constant folding in *shift ops
-(MVNshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)<<uint64(d))])
-(MVNshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)>>uint64(d))])
-(MVNshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [^(c>>uint64(d))])
-(NEGshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)<<uint64(d))])
-(NEGshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)>>uint64(d))])
-(NEGshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [-(c>>uint64(d))])
-(ADDshiftLL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)<<uint64(d))])
-(ADDshiftRL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)>>uint64(d))])
-(ADDshiftRA x (MOVDconst [c]) [d]) -> (ADDconst x [c>>uint64(d)])
-(SUBshiftLL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)<<uint64(d))])
-(SUBshiftRL x (MOVDconst [c]) [d]) -> (SUBconst x [int64(uint64(c)>>uint64(d))])
-(SUBshiftRA x (MOVDconst [c]) [d]) -> (SUBconst x [c>>uint64(d)])
-(ANDshiftLL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)<<uint64(d))])
-(ANDshiftRL x (MOVDconst [c]) [d]) -> (ANDconst x [int64(uint64(c)>>uint64(d))])
-(ANDshiftRA x (MOVDconst [c]) [d]) -> (ANDconst x [c>>uint64(d)])
-(ORshiftLL x (MOVDconst [c]) [d]) -> (ORconst x [int64(uint64(c)<<uint64(d))])
-(ORshiftRL x (MOVDconst [c]) [d]) -> (ORconst x [int64(uint64(c)>>uint64(d))])
-(ORshiftRA x (MOVDconst [c]) [d]) -> (ORconst x [c>>uint64(d)])
-(XORshiftLL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)<<uint64(d))])
-(XORshiftRL x (MOVDconst [c]) [d]) -> (XORconst x [int64(uint64(c)>>uint64(d))])
-(XORshiftRA x (MOVDconst [c]) [d]) -> (XORconst x [c>>uint64(d)])
-(BICshiftLL x (MOVDconst [c]) [d]) -> (ANDconst x [^int64(uint64(c)<<uint64(d))])
-(BICshiftRL x (MOVDconst [c]) [d]) -> (ANDconst x [^int64(uint64(c)>>uint64(d))])
-(BICshiftRA x (MOVDconst [c]) [d]) -> (ANDconst x [^(c>>uint64(d))])
-(ORNshiftLL x (MOVDconst [c]) [d]) -> (ORconst x [^int64(uint64(c)<<uint64(d))])
-(ORNshiftRL x (MOVDconst [c]) [d]) -> (ORconst x [^int64(uint64(c)>>uint64(d))])
-(ORNshiftRA x (MOVDconst [c]) [d]) -> (ORconst x [^(c>>uint64(d))])
-(EONshiftLL x (MOVDconst [c]) [d]) -> (XORconst x [^int64(uint64(c)<<uint64(d))])
-(EONshiftRL x (MOVDconst [c]) [d]) -> (XORconst x [^int64(uint64(c)>>uint64(d))])
-(EONshiftRA x (MOVDconst [c]) [d]) -> (XORconst x [^(c>>uint64(d))])
-(CMPshiftLL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)<<uint64(d))])
-(CMPshiftRL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)>>uint64(d))])
-(CMPshiftRA x (MOVDconst [c]) [d]) -> (CMPconst x [c>>uint64(d)])
-(CMNshiftLL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)<<uint64(d))])
-(CMNshiftRL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)>>uint64(d))])
-(CMNshiftRA x (MOVDconst [c]) [d]) -> (CMNconst x [c>>uint64(d)])
-(TSTshiftLL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)<<uint64(d))])
-(TSTshiftRL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)>>uint64(d))])
-(TSTshiftRA x (MOVDconst [c]) [d]) -> (TSTconst x [c>>uint64(d)])
+(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
+(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
+(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
+(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
+(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
+(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
+(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
+(ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
+(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
+(SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
+(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
+(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
+(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)<<uint64(d))])
+(ORshiftRL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)>>uint64(d))])
+(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
+(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
+(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
+(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
+(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
+(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)<<uint64(d))])
+(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)>>uint64(d))])
+(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))])
+(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
+(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
+(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
+(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
+(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
+(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
+(CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
+(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
+(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
+(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
// simplification with *shift ops
-(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(SUBshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(SUBshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
-(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
-(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
-(ORshiftLL x y:(SLLconst x [c]) [d]) && c==d -> y
-(ORshiftRL x y:(SRLconst x [c]) [d]) && c==d -> y
-(ORshiftRA x y:(SRAconst x [c]) [d]) && c==d -> y
-(XORshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(XORshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(XORshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(BICshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [0])
-(EONshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(EONshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(EONshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(ORNshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(ORNshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-(ORNshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [-1])
+(SUBshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(SUBshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(SUBshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
+(ANDshiftLL x y:(SLLconst x [c]) [d]) && c==d => y
+(ANDshiftRL x y:(SRLconst x [c]) [d]) && c==d => y
+(ANDshiftRA x y:(SRAconst x [c]) [d]) && c==d => y
+(ORshiftLL x y:(SLLconst x [c]) [d]) && c==d => y
+(ORshiftRL x y:(SRLconst x [c]) [d]) && c==d => y
+(ORshiftRA x y:(SRAconst x [c]) [d]) && c==d => y
+(XORshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(XORshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(XORshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
+(BICshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(BICshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [0])
+(BICshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [0])
+(EONshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(EONshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(EONshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(ORNshiftLL x (SLLconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(ORNshiftRL x (SRLconst x [c]) [d]) && c==d => (MOVDconst [-1])
+(ORNshiftRA x (SRAconst x [c]) [d]) && c==d => (MOVDconst [-1])
// Generate rotates with const shift
-(ADDshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
-( ORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
-(XORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
-(ADDshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x)
-( ORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x)
-(XORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x)
+(ADDshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+( ORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(XORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(ADDshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+( ORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+(XORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
(ADDshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
- -> (RORWconst [32-c] x)
+ => (RORWconst [32-c] x)
( ORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
- -> (RORWconst [32-c] x)
+ => (RORWconst [32-c] x)
(XORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
- -> (RORWconst [32-c] x)
-(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
-( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
-(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
+ => (RORWconst [32-c] x)
+(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
-(RORconst [c] (RORconst [d] x)) -> (RORconst [(c+d)&63] x)
-(RORWconst [c] (RORWconst [d] x)) -> (RORWconst [(c+d)&31] x)
+(RORconst [c] (RORconst [d] x)) => (RORconst [(c+d)&63] x)
+(RORWconst [c] (RORWconst [d] x)) => (RORWconst [(c+d)&31] x)
// Generate rotates with non-const shift.
// These rules match the Go source code like
((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
(CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
(CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
- -> (ROR x (NEG <t> y))
+ => (ROR x (NEG <t> y))
((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
(CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
(CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
- -> (ROR x y)
+ => (ROR x y)
// These rules match the Go source code like
// y &= 31
((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
(CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
(CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
- -> (RORW x (NEG <t> y))
+ => (RORW x (NEG <t> y))
((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
(CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
(CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
- -> (RORW x y)
+ => (RORW x y)
-// ((x>>8) | (x<<8)) -> (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
-((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) -> (REV16W x)
+// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
// Extract from reg pair
-(ADDshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
-( ORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
-(XORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
+(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
- -> (EXTRWconst [32-c] x2 x)
+ => (EXTRWconst [32-c] x2 x)
( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
- -> (EXTRWconst [32-c] x2 x)
+ => (EXTRWconst [32-c] x2 x)
(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
- -> (EXTRWconst [32-c] x2 x)
+ => (EXTRWconst [32-c] x2 x)
// Rewrite special pairs of shifts to AND.
// On ARM64 the bitmask can fit into an instruction.
-(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
-(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
+(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
+(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
// Special case setting bit as 1. An example is math.Copysign(c,-1)
-(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 -> (ORconst [c1] x)
+(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 => (ORconst [c1] x)
// bitfield ops
// sbfiz
// (x << lc) >> rc
-(SRAconst [rc] (SLLconst [lc] x)) && lc > rc -> (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
-(MOVWreg (SLLconst [lc] x)) && lc < 32 -> (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
-(MOVHreg (SLLconst [lc] x)) && lc < 16 -> (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
-(MOVBreg (SLLconst [lc] x)) && lc < 8 -> (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
// sbfx
// (x << lc) >> rc
-(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc -> (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
-(SRAconst [rc] (MOVWreg x)) && rc < 32 -> (SBFX [armBFAuxInt(rc, 32-rc)] x)
-(SRAconst [rc] (MOVHreg x)) && rc < 16 -> (SBFX [armBFAuxInt(rc, 16-rc)] x)
-(SRAconst [rc] (MOVBreg x)) && rc < 8 -> (SBFX [armBFAuxInt(rc, 8-rc)] x)
+(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
// sbfiz/sbfx combinations: merge shifts into bitfield ops
-(SRAconst [sc] (SBFIZ [bfc] x)) && sc < getARM64BFlsb(bfc)
- -> (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
-(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= getARM64BFlsb(bfc)
- && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- -> (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
// ubfiz
// (x & ac) << sc
(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
- -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFIZ [armBFAuxInt(sc, 32)] x)
-(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFIZ [armBFAuxInt(sc, 16)] x)
-(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFIZ [armBFAuxInt(sc, 8)] x)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(SLLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFIZ [armBFAuxInt(sc, 32)] x)
+(SLLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFIZ [armBFAuxInt(sc, 16)] x)
+(SLLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFIZ [armBFAuxInt(sc, 8)] x)
// (x << sc) & ac
(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
- -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
(MOVWUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, sc)
- -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
(MOVHUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, sc)
- -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
(MOVBUreg (SLLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, sc)
- -> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
// (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc > rc -> (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
// ubfx
// (x >> sc) & ac
(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
- -> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
-(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) -> (UBFX [armBFAuxInt(sc, 32)] x)
-(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) -> (UBFX [armBFAuxInt(sc, 16)] x)
-(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) -> (UBFX [armBFAuxInt(sc, 8)] x)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+(MOVWUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<32-1, 0) => (UBFX [armBFAuxInt(sc, 32)] x)
+(MOVHUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<16-1, 0) => (UBFX [armBFAuxInt(sc, 16)] x)
+(MOVBUreg (SRLconst [sc] x)) && isARM64BFMask(sc, 1<<8-1, 0) => (UBFX [armBFAuxInt(sc, 8)] x)
// (x & ac) >> sc
(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
- -> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
(SRLconst [sc] (MOVWUreg x)) && isARM64BFMask(sc, 1<<32-1, sc)
- -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
(SRLconst [sc] (MOVHUreg x)) && isARM64BFMask(sc, 1<<16-1, sc)
- -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
(SRLconst [sc] (MOVBUreg x)) && isARM64BFMask(sc, 1<<8-1, sc)
- -> (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
// (x << lc) >> rc
-(SRLconst [rc] (SLLconst [lc] x)) && lc < rc -> (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
// ubfiz/ubfx combinations: merge shifts into bitfield ops
-(SRLconst [sc] (UBFX [bfc] x)) && sc < getARM64BFwidth(bfc)
- -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
-(UBFX [bfc] (SRLconst [sc] x)) && sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
-(SLLconst [sc] (UBFIZ [bfc] x)) && sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
-(UBFIZ [bfc] (SLLconst [sc] x)) && sc < getARM64BFwidth(bfc)
- -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
// ((x << c1) >> c2) >> c3
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc == getARM64BFlsb(bfc)
- -> (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc < getARM64BFlsb(bfc)
- -> (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
-(SRLconst [sc] (UBFIZ [bfc] x)) && sc > getARM64BFlsb(bfc)
- && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- -> (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
// ((x << c1) << c2) >> c3
-(UBFX [bfc] (SLLconst [sc] x)) && sc == getARM64BFlsb(bfc)
- -> (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
-(UBFX [bfc] (SLLconst [sc] x)) && sc < getARM64BFlsb(bfc)
- -> (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
-(UBFX [bfc] (SLLconst [sc] x)) && sc > getARM64BFlsb(bfc)
- && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- -> (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
// bfi
(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
- && ac == ^((1<<uint(getARM64BFwidth(bfc))-1) << uint(getARM64BFlsb(bfc)))
- -> (BFI [bfc] y x)
+ && ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ => (BFI [bfc] y x)
(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
&& lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
- -> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+ => (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
// bfxil
-(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(getARM64BFwidth(bfc))-1)
- -> (BFXIL [bfc] y x)
-(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == getARM64BFwidth(bfc)
- -> (BFXIL [bfc] y x)
+(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ => (BFXIL [bfc] y x)
+(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth()
+ => (BFXIL [bfc] y x)
(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
- -> (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+ => (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
// do combined loads
// little endian loads
-// b[0] | b[1]<<8 -> load 16-bit
+// b[0] | b[1]<<8 => load 16-bit
(ORshiftLL <t> [8]
y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))
y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
&& y0.Uses == 1 && y1.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, y0, y1)
- -> @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ => @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
(ORshiftLL <t> [8]
y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))
y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
&& mergePoint(b,x0,x1) != nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x0, x1, y0, y1)
- -> @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
+ => @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
(ORshiftLL <t> [8]
y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
&& y0.Uses == 1 && y1.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, y0, y1)
- -> @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
+ => @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
-// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
x0:(MOVHUload [i0] {s} p mem)
y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
&& o0.Uses == 1
&& mergePoint(b,x0,x1,x2) != nil
&& clobber(x0, x1, x2, y1, y2, o0)
- -> @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ => @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
x0:(MOVHUloadidx ptr0 idx0 mem)
y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem)))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, y1, y2, o0)
- -> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
x0:(MOVHUloadidx ptr idx mem)
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
&& o0.Uses == 1
&& mergePoint(b,x0,x1,x2) != nil
&& clobber(x0, x1, x2, y1, y2, o0)
- -> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
x0:(MOVHUloadidx2 ptr0 idx0 mem)
y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem)))
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, y1, y2, o0)
- -> @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
-// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
x0:(MOVWUload [i0] {s} p mem)
y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem)))
&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4) != nil
&& clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
- -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
x0:(MOVWUloadidx ptr0 idx0 mem)
y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem)))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
- -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
x0:(MOVWUloadidx4 ptr0 idx0 mem)
y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem)))
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
- -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
x0:(MOVWUloadidx ptr idx mem)
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4) != nil
&& clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
- -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
-// b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 32-bit
+// b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 32-bit
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem)))
y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
&& mergePoint(b,x0,x1,x2,x3) != nil
&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- -> @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
y0:(MOVDnop x0:(MOVBUload [3] {s} p mem)))
y1:(MOVDnop x1:(MOVBUload [2] {s} p mem)))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- -> @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
&& mergePoint(b,x0,x1,x2,x3) != nil
&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- -> @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
-// b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 64-bit
+// b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 64-bit
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem)))
y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem)))
&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
y0:(MOVDnop x0:(MOVBUload [7] {s} p mem)))
y1:(MOVDnop x1:(MOVBUload [6] {s} p mem)))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
// big endian loads
-// b[1] | b[0]<<8 -> load 16-bit, reverse
+// b[1] | b[0]<<8 => load 16-bit, reverse
(ORshiftLL <t> [8]
y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
&& y0.Uses == 1 && y1.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, y0, y1)
- -> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
(ORshiftLL <t> [8]
y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))
y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem)))
&& mergePoint(b,x0,x1) != nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x0, x1, y0, y1)
- -> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
(ORshiftLL <t> [8]
y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem))
y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem)))
&& y0.Uses == 1 && y1.Uses == 1
&& mergePoint(b,x0,x1) != nil
&& clobber(x0, x1, y0, y1)
- -> @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
-// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit, reverse
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit, reverse
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
y0:(REV16W x0:(MOVHUload [i2] {s} p mem))
y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
&& o0.Uses == 1
&& mergePoint(b,x0,x1,x2) != nil
&& clobber(x0, x1, x2, y0, y1, y2, o0)
- -> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
y0:(REV16W x0:(MOVHUload [2] {s} p mem))
y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, y0, y1, y2, o0)
- -> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
(ORshiftLL <t> [24] o0:(ORshiftLL [16]
y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem))
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
&& o0.Uses == 1
&& mergePoint(b,x0,x1,x2) != nil
&& clobber(x0, x1, x2, y0, y1, y2, o0)
- -> @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
-// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit, reverse
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 64-bit, reverse
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
y0:(REVW x0:(MOVWUload [i4] {s} p mem))
y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem)))
&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4) != nil
&& clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
- -> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
y0:(REVW x0:(MOVWUload [4] {s} p mem))
y1:(MOVDnop x1:(MOVBUload [3] {s} p mem)))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
- -> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem))
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4) != nil
&& clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
- -> @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
-// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit, reverse
+// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit, reverse
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
&& mergePoint(b,x0,x1,x2,x3) != nil
&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- -> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- -> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
&& mergePoint(b,x0,x1,x2,x3) != nil
&& clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- -> @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
-// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit, reverse
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit, reverse
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
&& clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
// Combine zero stores into larger (unaligned) stores.
(MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
&& x.Uses == 1
- && areAdjacentOffsets(i,j,1)
- && is32Bit(min(i,j))
+ && areAdjacentOffsets(int64(i),int64(j),1)
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVHstorezero [min(i,j)] {s} ptr0 mem)
+ => (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
(MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstorezeroidx ptr1 idx1 mem)
+ => (MOVHstorezeroidx ptr1 idx1 mem)
(MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVHstorezeroidx ptr idx mem)
+ => (MOVHstorezeroidx ptr idx mem)
(MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
&& x.Uses == 1
- && areAdjacentOffsets(i,j,2)
- && is32Bit(min(i,j))
+ && areAdjacentOffsets(int64(i),int64(j),2)
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVWstorezero [min(i,j)] {s} ptr0 mem)
+ => (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
(MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVWstorezeroidx ptr1 idx1 mem)
+ => (MOVWstorezeroidx ptr1 idx1 mem)
(MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVWstorezeroidx ptr idx mem)
+ => (MOVWstorezeroidx ptr idx mem)
(MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
+ => (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
(MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
&& x.Uses == 1
- && areAdjacentOffsets(i,j,4)
- && is32Bit(min(i,j))
+ && areAdjacentOffsets(int64(i),int64(j),4)
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVDstorezero [min(i,j)] {s} ptr0 mem)
+ => (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
(MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVDstorezeroidx ptr1 idx1 mem)
+ => (MOVDstorezeroidx ptr1 idx1 mem)
(MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVDstorezeroidx ptr idx mem)
+ => (MOVDstorezeroidx ptr idx mem)
(MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
+ => (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
(MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
&& x.Uses == 1
- && areAdjacentOffsets(i,j,8)
- && is32Bit(min(i,j))
+ && areAdjacentOffsets(int64(i),int64(j),8)
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVQstorezero [min(i,j)] {s} ptr0 mem)
+ => (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
(MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVQstorezero [0] {s} p0 mem)
+ => (MOVQstorezero [0] {s} p0 mem)
(MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVQstorezero [0] {s} p0 mem)
+ => (MOVQstorezero [0] {s} p0 mem)
// Combine stores into larger (unaligned) stores.
(MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr0 w mem)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr1 idx1 w mem)
+ => (MOVHstoreidx ptr1 idx1 w mem)
(MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVHstoreidx ptr idx w mem)
+ => (MOVHstoreidx ptr idx w mem)
(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr0 w mem)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr1 idx1 w mem)
+ => (MOVHstoreidx ptr1 idx1 w mem)
(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr0 w mem)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr1 idx1 w mem)
+ => (MOVHstoreidx ptr1 idx1 w mem)
(MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr0 w mem)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr1 idx1 w mem)
+ => (MOVHstoreidx ptr1 idx1 w mem)
(MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr0 w0 mem)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr1 idx1 w0 mem)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
(MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
- && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc)
- && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2)
- && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8
+ && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+ && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+ && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr0 w0 mem)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
- && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc)
- && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2)
- && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8
+ && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+ && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+ && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
&& clobber(x)
- -> (MOVHstoreidx ptr1 idx1 w0 mem)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
(MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr0 w0 mem)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr1 idx1 w0 mem)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
(MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVWstore [i-2] {s} ptr0 w mem)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVWstoreidx ptr1 idx1 w mem)
+ => (MOVWstoreidx ptr1 idx1 w mem)
(MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVWstoreidx ptr idx w mem)
+ => (MOVWstoreidx ptr idx w mem)
(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
(MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVWstore [i-2] {s} ptr0 w mem)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
(MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVWstoreidx ptr1 idx1 w mem)
+ => (MOVWstoreidx ptr1 idx1 w mem)
(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
(MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVWstore [i-2] {s} ptr0 w mem)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVWstoreidx ptr1 idx1 w mem)
+ => (MOVWstoreidx ptr1 idx1 w mem)
(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
(MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVWstore [i-2] {s} ptr0 w0 mem)
+ => (MOVWstore [i-2] {s} ptr0 w0 mem)
(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVWstoreidx ptr1 idx1 w0 mem)
+ => (MOVWstoreidx ptr1 idx1 w0 mem)
(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
(MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVDstore [i-4] {s} ptr0 w mem)
+ => (MOVDstore [i-4] {s} ptr0 w mem)
(MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVDstoreidx ptr1 idx1 w mem)
+ => (MOVDstoreidx ptr1 idx1 w mem)
(MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVDstoreidx ptr idx w mem)
+ => (MOVDstoreidx ptr idx w mem)
(MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
+ => (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
(MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem))
&& x.Uses == 1
&& isSamePtr(ptr0, ptr1)
&& clobber(x)
- -> (MOVDstore [i-4] {s} ptr0 w0 mem)
+ => (MOVDstore [i-4] {s} ptr0 w0 mem)
(MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVDstoreidx ptr1 idx1 w0 mem)
+ => (MOVDstoreidx ptr1 idx1 w0 mem)
(MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem))
&& x.Uses == 1
&& s == nil
&& isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
&& clobber(x)
- -> (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
+ => (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
(MOVBstore [i] {s} ptr w
x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
&& x5.Uses == 1
&& x6.Uses == 1
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- -> (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+ => (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
(MOVBstore [7] {s} p w
x0:(MOVBstore [6] {s} p (SRLconst [8] w)
x1:(MOVBstore [5] {s} p (SRLconst [16] w)
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2, x3, x4, x5, x6)
- -> (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+ => (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
(MOVBstore [i] {s} ptr w
x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w)
x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w)
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- -> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
(MOVBstore [3] {s} p w
x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w)
x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w)
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2)
- -> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
(MOVBstoreidx ptr (ADDconst [3] idx) w
x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w)
x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w)
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- -> (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+ => (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
(MOVBstoreidx ptr idx w
x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w)
x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w)
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- -> (MOVWstoreidx ptr idx w mem)
+ => (MOVWstoreidx ptr idx w mem)
(MOVBstore [i] {s} ptr w
x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w))
x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w))
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- -> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
(MOVBstore [3] {s} p w
x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w))
x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w))
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2)
- -> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
(MOVBstore [i] {s} ptr w
x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
&& x1.Uses == 1
&& x2.Uses == 1
&& clobber(x0, x1, x2)
- -> (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
(MOVBstore [3] {s} p w
x0:(MOVBstore [2] {s} p (SRLconst [8] w)
x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w)
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& isSamePtr(p1, p)
&& clobber(x0, x1, x2)
- -> (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVHstoreidx ptr idx w mem)
+ => (MOVHstoreidx ptr idx w mem)
(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
&& x.Uses == 1
&& clobber(x)
- -> (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
&& x.Uses == 1
&& s == nil
&& (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
&& clobber(x)
- -> (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
// FP simplification
-(FNEGS (FMULS x y)) -> (FNMULS x y)
-(FNEGD (FMULD x y)) -> (FNMULD x y)
-(FMULS (FNEGS x) y) -> (FNMULS x y)
-(FMULD (FNEGD x) y) -> (FNMULD x y)
-(FNEGS (FNMULS x y)) -> (FMULS x y)
-(FNEGD (FNMULD x y)) -> (FMULD x y)
-(FNMULS (FNEGS x) y) -> (FMULS x y)
-(FNMULD (FNEGD x) y) -> (FMULD x y)
-(FADDS a (FMULS x y)) -> (FMADDS a x y)
-(FADDD a (FMULD x y)) -> (FMADDD a x y)
-(FSUBS a (FMULS x y)) -> (FMSUBS a x y)
-(FSUBD a (FMULD x y)) -> (FMSUBD a x y)
-(FSUBS (FMULS x y) a) -> (FNMSUBS a x y)
-(FSUBD (FMULD x y) a) -> (FNMSUBD a x y)
-(FADDS a (FNMULS x y)) -> (FMSUBS a x y)
-(FADDD a (FNMULD x y)) -> (FMSUBD a x y)
-(FSUBS a (FNMULS x y)) -> (FMADDS a x y)
-(FSUBD a (FNMULD x y)) -> (FMADDD a x y)
-(FSUBS (FNMULS x y) a) -> (FNMADDS a x y)
-(FSUBD (FNMULD x y) a) -> (FNMADDD a x y)
-
-(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read8(sym, off))])
-(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
-(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))])
-(MOVDload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVDconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))])
+(FNEGS (FMULS x y)) => (FNMULS x y)
+(FNEGD (FMULD x y)) => (FNMULD x y)
+(FMULS (FNEGS x) y) => (FNMULS x y)
+(FMULD (FNEGD x) y) => (FNMULD x y)
+(FNEGS (FNMULS x y)) => (FMULS x y)
+(FNEGD (FNMULD x y)) => (FMULD x y)
+(FNMULS (FNEGS x) y) => (FMULS x y)
+(FNMULD (FNEGD x) y) => (FMULD x y)
+(FADDS a (FMULS x y)) => (FMADDS a x y)
+(FADDD a (FMULD x y)) => (FMADDD a x y)
+(FSUBS a (FMULS x y)) => (FMSUBS a x y)
+(FSUBD a (FMULD x y)) => (FMSUBD a x y)
+(FSUBS (FMULS x y) a) => (FNMSUBS a x y)
+(FSUBD (FMULD x y) a) => (FNMSUBD a x y)
+(FADDS a (FNMULS x y)) => (FMSUBS a x y)
+(FADDD a (FNMULD x y)) => (FMSUBD a x y)
+(FSUBS a (FNMULS x y)) => (FMADDS a x y)
+(FSUBD a (FNMULD x y)) => (FMADDD a x y)
+(FSUBS (FNMULS x y) a) => (FNMADDS a x y)
+(FSUBD (FNMULD x y) a) => (FNMADDD a x y)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ADDshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ADDshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ADDshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
// match: (ADDshiftLL (MOVDconst [c]) x [d])
// result: (ADDconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ADDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ADDshiftLL x (MOVDconst [c]) [d])
// result: (ADDconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ADDconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// match: (ADDshiftLL [c] (SRLconst x [64-c]) x)
// result: (RORconst [64-c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg(x)
return true
}
// result: (RORWconst [32-c] x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg(x)
return true
}
// match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// result: (REV16W x)
for {
- if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
// match: (ADDshiftLL [c] (SRLconst x [64-c]) x2)
// result: (EXTRconst [64-c] x2 x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
x2 := v_1
v.reset(OpARM64EXTRconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg2(x2, x)
return true
}
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
x2 := v_1
if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg2(x2, x)
return true
}
// match: (ADDshiftRA (MOVDconst [c]) x [d])
// result: (ADDconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ADDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ADDshiftRA x (MOVDconst [c]) [d])
// result: (ADDconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ADDconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
// match: (ADDshiftRL (MOVDconst [c]) x [d])
// result: (ADDconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ADDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ADDshiftRL x (MOVDconst [c]) [d])
// result: (ADDconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ADDconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// match: (ADDshiftRL [c] (SLLconst x [64-c]) x)
// result: (RORconst [ c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// result: (RORWconst [c] x)
for {
t := v.Type
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ANDshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ANDshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ANDshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
// cond: isARM64BFMask(sc, ac, sc)
// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
- ac := v.AuxInt
+ ac := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, sc)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, ac, 0)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
for {
- ac := v.AuxInt
+ ac := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, 0)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
v.AddArg(x)
return true
}
// match: (ANDshiftLL (MOVDconst [c]) x [d])
// result: (ANDconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ANDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ANDshiftLL x (MOVDconst [c]) [d])
// result: (ANDconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SLLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
// match: (ANDshiftRA (MOVDconst [c]) x [d])
// result: (ANDconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ANDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ANDshiftRA x (MOVDconst [c]) [d])
// result: (ANDconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SRAconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
// match: (ANDshiftRL (MOVDconst [c]) x [d])
// result: (ANDconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ANDconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ANDshiftRL x (MOVDconst [c]) [d])
// result: (ANDconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SRLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64BICshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64BICshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64BICshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
// match: (BICshiftLL x (MOVDconst [c]) [d])
// result: (ANDconst x [^int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = ^int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
// match: (BICshiftRA x (MOVDconst [c]) [d])
// result: (ANDconst x [^(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = ^(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
// match: (BICshiftRL x (MOVDconst [c]) [d])
// result: (ANDconst x [^int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ANDconst)
- v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64CMNshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64CMNshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64CMNshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
// match: (CMNshiftLL (MOVDconst [c]) x [d])
// result: (CMNconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64CMNconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (CMNshiftLL x (MOVDconst [c]) [d])
// result: (CMNconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMNconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// match: (CMNshiftRA (MOVDconst [c]) x [d])
// result: (CMNconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64CMNconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (CMNshiftRA x (MOVDconst [c]) [d])
// result: (CMNconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMNconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
// match: (CMNshiftRL (MOVDconst [c]) x [d])
// result: (CMNconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64CMNconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (CMNshiftRL x (MOVDconst [c]) [d])
// result: (CMNconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMNconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64CMPshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x0.Op != OpARM64SLLconst {
break
}
- c := x0.AuxInt
+ c := auxIntToInt64(x0.AuxInt)
y := x0.Args[0]
x1 := v_1
if !(clobberIfDead(x0)) {
}
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg2(x1, y)
v.AddArg(v0)
return true
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64CMPshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x0.Op != OpARM64SRLconst {
break
}
- c := x0.AuxInt
+ c := auxIntToInt64(x0.AuxInt)
y := x0.Args[0]
x1 := v_1
if !(clobberIfDead(x0)) {
}
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg2(x1, y)
v.AddArg(v0)
return true
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64CMPshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x0.Op != OpARM64SRAconst {
break
}
- c := x0.AuxInt
+ c := auxIntToInt64(x0.AuxInt)
y := x0.Args[0]
x1 := v_1
if !(clobberIfDead(x0)) {
}
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v0.AddArg2(x1, y)
v.AddArg(v0)
return true
// match: (CMPshiftLL (MOVDconst [c]) x [d])
// result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v1.AuxInt = d
+ v1.AuxInt = int64ToAuxInt(d)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
// match: (CMPshiftLL x (MOVDconst [c]) [d])
// result: (CMPconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMPconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// match: (CMPshiftRA (MOVDconst [c]) x [d])
// result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v1.AuxInt = d
+ v1.AuxInt = int64ToAuxInt(d)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
// match: (CMPshiftRA x (MOVDconst [c]) [d])
// result: (CMPconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMPconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
// match: (CMPshiftRL (MOVDconst [c]) x [d])
// result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64InvertFlags)
v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
- v0.AuxInt = c
+ v0.AuxInt = int64ToAuxInt(c)
v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v1.AuxInt = d
+ v1.AuxInt = int64ToAuxInt(d)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
// match: (CMPshiftRL x (MOVDconst [c]) [d])
// result: (CMPconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64CMPconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64EONshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64EONshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64EONshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
// match: (EONshiftLL x (MOVDconst [c]) [d])
// result: (XORconst x [^int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = ^int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
// match: (EONshiftRA x (MOVDconst [c]) [d])
// result: (XORconst x [^(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = ^(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
// match: (EONshiftRL x (MOVDconst [c]) [d])
// result: (XORconst x [^int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
}
// match: (MOVBUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVDconst [int64(read8(sym, off))])
+ // result: (MOVDconst [int64(read8(sym, int64(off)))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64(read8(sym, off))
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
return true
}
return false
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<8-1, sc)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
v.AddArg(x)
return true
}
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<8-1, 0)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, 8)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
v.AddArg(x)
return true
}
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc < 8) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(lc, 8-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
v.AddArg(x)
return true
}
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
continue
}
w := v_1.Args[0]
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 8) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
continue
}
w := v_1.Args[0]
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(8, 24) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
continue
}
w := v_1.Args[0]
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
break
}
v_1_0 := v_1.Args[0]
}
w := v_1_0.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 8 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
continue
}
v_1_0 := v_1.Args[0]
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
ptr1 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
if v_1.Op != OpARM64SRLconst {
continue
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVBstoreidx {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
break
}
// match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
- // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x)
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64UBFX {
break
}
- bfc := v_1.AuxInt
+ bfc := auxIntToArm64BitField(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
if w0.Op != OpARM64UBFX {
break
}
- bfc2 := w0.AuxInt
- if w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) {
+ bfc2 := auxIntToArm64BitField(w0.AuxInt)
+ if w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
// match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
- // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x)
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
if v_1.Op != OpARM64UBFX {
continue
}
- bfc := v_1.AuxInt
+ bfc := auxIntToArm64BitField(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVBstoreidx {
if w0.Op != OpARM64UBFX {
continue
}
- bfc2 := w0.AuxInt
- if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) {
+ bfc2 := auxIntToArm64BitField(w0.AuxInt)
+ if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVHstore [i-1] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64MOVDreg {
break
}
w := v_1_0.Args[0]
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
ptr1 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
break
}
w0_0 := w0.Args[0]
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
if v_1.Op != OpARM64SRLconst {
continue
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64MOVDreg {
continue
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-8 {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
continue
}
w0_0 := w0.Args[0]
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] {
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
break
}
x3 := x2.Args[2]
- if x3.Op != OpARM64MOVBstore || x3.AuxInt != i-4 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64SRLconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+ if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x4 := x3.Args[2]
- if x4.Op != OpARM64MOVBstore || x4.AuxInt != i-5 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != i-5 || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
break
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64SRLconst || x4_1.AuxInt != 40 || w != x4_1.Args[0] {
+ if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
break
}
x5 := x4.Args[2]
- if x5.Op != OpARM64MOVBstore || x5.AuxInt != i-6 || x5.Aux != s {
+ if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != i-6 || auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
break
}
x5_1 := x5.Args[1]
- if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] {
+ if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
break
}
x6 := x5.Args[2]
- if x6.Op != OpARM64MOVBstore || x6.AuxInt != i-7 || x6.Aux != s {
+ if x6.Op != OpARM64MOVBstore || auxIntToInt32(x6.AuxInt) != i-7 || auxToSym(x6.Aux) != s {
break
}
mem := x6.Args[2]
break
}
x6_1 := x6.Args[1]
- if x6_1.Op != OpARM64SRLconst || x6_1.AuxInt != 56 || w != x6_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ if x6_1.Op != OpARM64SRLconst || auxIntToInt64(x6_1.AuxInt) != 56 || w != x6_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
break
}
v.reset(OpARM64MOVDstore)
- v.AuxInt = i - 7
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 7)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)
// result: (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
for {
- if v.AuxInt != 7 {
+ if auxIntToInt32(v.AuxInt) != 7 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != 6 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 6 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != 5 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != 4 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != 4 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] {
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
break
}
x3 := x2.Args[2]
- if x3.Op != OpARM64MOVBstore || x3.AuxInt != 3 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[2]
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64SRLconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
+ if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
break
}
x4 := x3.Args[2]
- if x4.Op != OpARM64MOVBstore || x4.AuxInt != 2 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != 2 || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[2]
break
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64SRLconst || x4_1.AuxInt != 40 || w != x4_1.Args[0] {
+ if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
break
}
x5 := x4.Args[2]
- if x5.Op != OpARM64MOVBstore || x5.AuxInt != 1 || x5.Aux != s {
+ if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != 1 || auxToSym(x5.Aux) != s {
break
}
_ = x5.Args[2]
ptr1 := p1_0
idx1 := p1_1
x5_1 := x5.Args[1]
- if x5_1.Op != OpARM64SRLconst || x5_1.AuxInt != 48 || w != x5_1.Args[0] {
+ if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
continue
}
x6 := x5.Args[2]
ptr0 := x6.Args[0]
idx0 := x6.Args[1]
x6_2 := x6.Args[2]
- if x6_2.Op != OpARM64SRLconst || x6_2.AuxInt != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ if x6_2.Op != OpARM64SRLconst || auxIntToInt64(x6_2.AuxInt) != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
continue
}
v.reset(OpARM64MOVDstoreidx)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64UBFX || x0_1.AuxInt != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
break
}
mem := x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64UBFX || x2_1.AuxInt != armBFAuxInt(24, 8) || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x2_1.Op != OpARM64UBFX || auxIntToArm64BitField(x2_1.AuxInt) != armBFAuxInt(24, 8) || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 3
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
for {
- if v.AuxInt != 3 {
+ if auxIntToInt32(v.AuxInt) != 3 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64UBFX || x0_1.AuxInt != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
ptr1 := p1_0
idx1 := p1_1
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64UBFX || x1_1.AuxInt != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
continue
}
x2 := x1.Args[2]
ptr0 := x2.Args[0]
idx0 := x2.Args[1]
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
continue
}
v.reset(OpARM64MOVWstoreidx)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
break
}
x0_1_0 := x0_1.Args[0]
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
break
}
x1_1_0 := x1_1.Args[0]
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
break
}
mem := x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 {
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 {
break
}
x2_1_0 := x2_1.Args[0]
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 3
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
for {
- if v.AuxInt != 3 {
+ if auxIntToInt32(v.AuxInt) != 3 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
break
}
x0_1_0 := x0_1.Args[0]
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
ptr1 := p1_0
idx1 := p1_1
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
continue
}
x1_1_0 := x1_1.Args[0]
ptr0 := x2.Args[0]
idx0 := x2.Args[1]
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 {
+ if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 {
continue
}
x2_2_0 := x2_2.Args[0]
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
// result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
break
}
x2 := x1.Args[2]
- if x2.Op != OpARM64MOVBstore || x2.AuxInt != i-3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
break
}
mem := x2.Args[2]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64SRLconst || x2_1.AuxInt != 24 || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 3
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
// result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
for {
- if v.AuxInt != 3 {
+ if auxIntToInt32(v.AuxInt) != 3 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p := v_0
w := v_1
x0 := v_2
- if x0.Op != OpARM64MOVBstore || x0.AuxInt != 2 || x0.Aux != s {
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
break
}
_ = x0.Args[2]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64SRLconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] {
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
break
}
x1 := x0.Args[2]
- if x1.Op != OpARM64MOVBstore || x1.AuxInt != 1 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[2]
ptr1 := p1_0
idx1 := p1_1
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64SRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
continue
}
x2 := x1.Args[2]
ptr0 := x2.Args[0]
idx0 := x2.Args[1]
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64SRLconst || x2_2.AuxInt != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
continue
}
v.reset(OpARM64MOVWstoreidx)
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64SRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
ptr0 := x.Args[0]
idx0 := x.Args[1]
x_2 := x.Args[2]
- if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64UBFX || x_1.AuxInt != armBFAuxInt(8, 8) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 8) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
ptr0 := x.Args[0]
idx0 := x.Args[1]
x_2 := x.Args[2]
- if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64SRLconst || x_1.AuxInt != 8 {
+ if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 {
break
}
x_1_0 := x_1.Args[0]
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
ptr0 := x.Args[0]
idx0 := x.Args[1]
x_2 := x.Args[2]
- if x_2.Op != OpARM64SRLconst || x_2.AuxInt != 8 {
+ if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 {
continue
}
x_2_0 := x_2.Args[0]
// cond: x.Uses == 1 && clobber(x)
// result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr := v_0
w := v_1
x := v_2
- if x.Op != OpARM64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64UBFX || x_1.AuxInt != armBFAuxInt(8, 24) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 24) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstore)
- v.AuxInt = i - 1
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
v0.AddArg(w)
v.AddArg3(ptr, v0, mem)
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
ptr0 := x.Args[0]
idx0 := x.Args[1]
x_2 := x.Args[2]
- if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVHstoreidx)
// result: (MOVHstoreidx ptr idx w mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
break
}
idx := v_1.Args[0]
- if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 8 {
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 8 {
break
}
w := v_2.Args[0]
// result: (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 3 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 3 {
break
}
idx := v_1.Args[0]
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 2 || idx != x0_1.Args[0] {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 || idx != x0_1.Args[0] {
break
}
x0_2 := x0.Args[2]
- if x0_2.Op != OpARM64UBFX || x0_2.AuxInt != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+ if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
break
}
x1 := x0.Args[3]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] {
break
}
x1_2 := x1.Args[2]
- if x1_2.Op != OpARM64UBFX || x1_2.AuxInt != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+ if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
break
}
x2 := x1.Args[3]
break
}
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpARM64MOVWstoreidx)
break
}
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 1 || idx != x0_1.Args[0] {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 || idx != x0_1.Args[0] {
break
}
x0_2 := x0.Args[2]
- if x0_2.Op != OpARM64UBFX || x0_2.AuxInt != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+ if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
break
}
x1 := x0.Args[3]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] {
break
}
x1_2 := x1.Args[2]
- if x1_2.Op != OpARM64UBFX || x1_2.AuxInt != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+ if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
break
}
x2 := x1.Args[3]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 3 || idx != x2_1.Args[0] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] {
break
}
x2_2 := x2.Args[2]
- if x2_2.Op != OpARM64UBFX || x2_2.AuxInt != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
break
}
v.reset(OpARM64MOVWstoreidx)
// result: (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
break
}
idx := v_1.Args[0]
break
}
x_2 := x.Args[2]
- if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstoreidx)
break
}
x_1 := x.Args[1]
- if x_1.Op != OpARM64ADDconst || x_1.AuxInt != 1 || idx != x_1.Args[0] {
+ if x_1.Op != OpARM64ADDconst || auxIntToInt64(x_1.AuxInt) != 1 || idx != x_1.Args[0] {
break
}
x_2 := x.Args[2]
- if x_2.Op != OpARM64UBFX || x_2.AuxInt != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
break
}
v.reset(OpARM64MOVHstoreidx)
return true
}
// match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
- // cond: x.Uses == 1 && areAdjacentOffsets(i,j,1) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
- // result: (MOVHstorezero [min(i,j)] {s} ptr0 mem)
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),1) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
x := v_1
if x.Op != OpARM64MOVBstorezero {
break
}
- j := x.AuxInt
- if x.Aux != s {
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
ptr1 := x.Args[0]
- if !(x.Uses == 1 && areAdjacentOffsets(i, j, 1) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 1) && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVHstorezero)
- v.AuxInt = min(i, j)
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
v.AddArg2(ptr0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVHstorezeroidx ptr1 idx1 mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt32(v.AuxInt) != 1 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
// result: (MOVHstorezeroidx ptr idx mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 1 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
break
}
idx := v_1.Args[0]
}
// match: (MOVDload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVDconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64(read64(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
return true
}
// match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
- // cond: x.Uses == 1 && areAdjacentOffsets(i,j,8) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
- // result: (MOVQstorezero [min(i,j)] {s} ptr0 mem)
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),8) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
x := v_1
if x.Op != OpARM64MOVDstorezero {
break
}
- j := x.AuxInt
- if x.Aux != s {
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
ptr1 := x.Args[0]
- if !(x.Uses == 1 && areAdjacentOffsets(i, j, 8) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 8) && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVQstorezero)
- v.AuxInt = min(i, j)
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
v.AddArg2(ptr0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVQstorezero [0] {s} p0 mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt32(v.AuxInt) != 8 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p0 := v_0
if p0.Op != OpARM64ADD {
break
continue
}
v.reset(OpARM64MOVQstorezero)
- v.AuxInt = 0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(s)
v.AddArg2(p0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVQstorezero [0] {s} p0 mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt32(v.AuxInt) != 8 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
p0 := v_0
- if p0.Op != OpARM64ADDshiftLL || p0.AuxInt != 3 {
+ if p0.Op != OpARM64ADDshiftLL || auxIntToInt64(p0.AuxInt) != 3 {
break
}
idx0 := p0.Args[1]
break
}
v.reset(OpARM64MOVQstorezero)
- v.AuxInt = 0
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(s)
v.AddArg2(p0, mem)
return true
}
}
// match: (MOVHUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVDconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<16-1, sc)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
v.AddArg(x)
return true
}
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<16-1, 0)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, 16)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
v.AddArg(x)
return true
}
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc < 16) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(lc, 16-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
v.AddArg(x)
return true
}
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
continue
}
w := v_1.Args[0]
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
ptr0 := v_0.Args[0]
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
w := v_1.Args[0]
}
v.reset(OpARM64MOVWstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w, mem)
return true
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
continue
}
w := v_1.Args[0]
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
ptr0 := v_0.Args[0]
- if v_1.Op != OpARM64UBFX || v_1.AuxInt != armBFAuxInt(16, 16) {
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
break
}
w := v_1.Args[0]
}
v.reset(OpARM64MOVWstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w, mem)
return true
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
v_1_0 := v_1.Args[0]
}
w := v_1_0.Args[0]
x := v_2
- if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
continue
}
v_1_0 := v_1.Args[0]
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
ptr0 := v_0.Args[0]
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 16 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
break
}
v_1_0 := v_1.Args[0]
}
v.reset(OpARM64MOVWstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w, mem)
return true
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVWstore [i-2] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVHstore || x.AuxInt != i-2 || x.Aux != s {
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
ptr1 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVWstore)
- v.AuxInt = i - 2
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
if v_1.Op != OpARM64SRLconst {
continue
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVHstoreidx {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVWstoreidx)
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVHstoreidx2 {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
break
}
v.reset(OpARM64MOVWstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w0, mem)
return true
// result: (MOVWstoreidx ptr idx w mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 2 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
break
}
idx := v_1.Args[0]
- if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 16 {
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 16 {
break
}
w := v_2.Args[0]
return true
}
// match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
- // cond: x.Uses == 1 && areAdjacentOffsets(i,j,2) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
- // result: (MOVWstorezero [min(i,j)] {s} ptr0 mem)
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),2) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
x := v_1
if x.Op != OpARM64MOVHstorezero {
break
}
- j := x.AuxInt
- if x.Aux != s {
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
ptr1 := x.Args[0]
- if !(x.Uses == 1 && areAdjacentOffsets(i, j, 2) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 2) && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVWstorezero)
- v.AuxInt = min(i, j)
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
v.AddArg2(ptr0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVWstorezeroidx ptr1 idx1 mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt32(v.AuxInt) != 2 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 1 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
break
}
idx0 := v_0.Args[1]
}
v.reset(OpARM64MOVWstorezeroidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 1
+ v0.AuxInt = int64ToAuxInt(1)
v0.AddArg(idx1)
v.AddArg3(ptr1, v0, mem)
return true
// result: (MOVWstorezeroidx ptr idx mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 2 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
break
}
idx := v_1.Args[0]
}
// match: (MOVWUload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVDconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = int64(read32(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<32-1, sc)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
v.AddArg(x)
return true
}
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, 1<<32-1, 0)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, 32)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
v.AddArg(x)
return true
}
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc < 32) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(lc, 32-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
v.AddArg(x)
return true
}
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVDstore [i-4] {s} ptr0 w mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
break
}
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVWstore || x.AuxInt != i-4 || x.Aux != s {
+ if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
break
}
v.reset(OpARM64MOVDstore)
- v.AuxInt = i - 4
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVDstoreidx ptr1 idx1 w mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
ptr0 := v_0_0
idx0 := v_0_1
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
continue
}
w := v_1.Args[0]
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
break
}
idx0 := v_0.Args[1]
ptr0 := v_0.Args[0]
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != 32 {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
break
}
w := v_1.Args[0]
}
v.reset(OpARM64MOVDstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 2
+ v0.AuxInt = int64ToAuxInt(2)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w, mem)
return true
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVDstore [i-4] {s} ptr0 w0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
- if x.Op != OpARM64MOVWstore || x.AuxInt != i-4 || x.Aux != s {
+ if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
break
}
mem := x.Args[2]
ptr1 := x.Args[0]
w0 := x.Args[1]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVDstore)
- v.AuxInt = i - 4
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
v.AddArg3(ptr0, w0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVDstoreidx ptr1 idx1 w0 mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
if v_1.Op != OpARM64SRLconst {
continue
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVWstoreidx {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
continue
}
v.reset(OpARM64MOVDstoreidx)
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
break
}
idx0 := v_0.Args[1]
if v_1.Op != OpARM64SRLconst {
break
}
- j := v_1.AuxInt
+ j := auxIntToInt64(v_1.AuxInt)
w := v_1.Args[0]
x := v_2
if x.Op != OpARM64MOVWstoreidx4 {
ptr1 := x.Args[0]
idx1 := x.Args[1]
w0 := x.Args[2]
- if w0.Op != OpARM64SRLconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
break
}
v.reset(OpARM64MOVDstoreidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 2
+ v0.AuxInt = int64ToAuxInt(2)
v0.AddArg(idx1)
v.AddArg4(ptr1, v0, w0, mem)
return true
// result: (MOVDstoreidx ptr idx w mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 4 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
break
}
idx := v_1.Args[0]
- if v_2.Op != OpARM64SRLconst || v_2.AuxInt != 32 {
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 32 {
break
}
w := v_2.Args[0]
return true
}
// match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
- // cond: x.Uses == 1 && areAdjacentOffsets(i,j,4) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x)
- // result: (MOVDstorezero [min(i,j)] {s} ptr0 mem)
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),4) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
for {
- i := v.AuxInt
- s := v.Aux
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
ptr0 := v_0
x := v_1
if x.Op != OpARM64MOVWstorezero {
break
}
- j := x.AuxInt
- if x.Aux != s {
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
break
}
mem := x.Args[1]
ptr1 := x.Args[0]
- if !(x.Uses == 1 && areAdjacentOffsets(i, j, 4) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 4) && isSamePtr(ptr0, ptr1) && clobber(x)) {
break
}
v.reset(OpARM64MOVDstorezero)
- v.AuxInt = min(i, j)
- v.Aux = s
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
v.AddArg2(ptr0, mem)
return true
}
// cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
// result: (MOVDstorezeroidx ptr1 idx1 mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
+ s := auxToSym(v.Aux)
if v_0.Op != OpARM64ADD {
break
}
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt32(v.AuxInt) != 4 {
break
}
- s := v.Aux
- if v_0.Op != OpARM64ADDshiftLL || v_0.AuxInt != 2 {
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
break
}
idx0 := v_0.Args[1]
}
v.reset(OpARM64MOVDstorezeroidx)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
- v0.AuxInt = 2
+ v0.AuxInt = int64ToAuxInt(2)
v0.AddArg(idx1)
v.AddArg3(ptr1, v0, mem)
return true
// result: (MOVDstorezeroidx ptr idx mem)
for {
ptr := v_0
- if v_1.Op != OpARM64ADDconst || v_1.AuxInt != 4 {
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
break
}
idx := v_1.Args[0]
if x.Op != OpARM64SLLconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64MVNshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
if x.Op != OpARM64SRLconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64MVNshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
if x.Op != OpARM64SRAconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64MVNshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
// match: (MVNshiftLL (MOVDconst [c]) [d])
// result: (MOVDconst [^int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = ^int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
return true
}
return false
// match: (MVNshiftRA (MOVDconst [c]) [d])
// result: (MOVDconst [^(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = ^(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
return true
}
return false
// match: (MVNshiftRL (MOVDconst [c]) [d])
// result: (MOVDconst [^int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
return true
}
return false
if x.Op != OpARM64SLLconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64NEGshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
if x.Op != OpARM64SRLconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64NEGshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
if x.Op != OpARM64SRAconst {
break
}
- c := x.AuxInt
+ c := auxIntToInt64(x.AuxInt)
y := x.Args[0]
if !(clobberIfDead(x)) {
break
}
v.reset(OpARM64NEGshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(y)
return true
}
// match: (NEGshiftLL (MOVDconst [c]) [d])
// result: (MOVDconst [-int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = -int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) << uint64(d)))
return true
}
return false
// match: (NEGshiftRA (MOVDconst [c]) [d])
// result: (MOVDconst [-(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = -(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(-(c >> uint64(d)))
return true
}
return false
// match: (NEGshiftRL (MOVDconst [c]) [d])
// result: (MOVDconst [-int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
v.reset(OpARM64MOVDconst)
- v.AuxInt = -int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) >> uint64(d)))
return true
}
return false
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ORshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ORshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64ORshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
break
}
// match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
- // cond: ac == ^((1<<uint(getARM64BFwidth(bfc))-1) << uint(getARM64BFlsb(bfc)))
+ // cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
// result: (BFI [bfc] y x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpARM64UBFIZ {
continue
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpARM64ANDconst {
continue
}
- ac := v_1.AuxInt
+ ac := auxIntToInt64(v_1.AuxInt)
y := v_1.Args[0]
- if !(ac == ^((1<<uint(getARM64BFwidth(bfc)) - 1) << uint(getARM64BFlsb(bfc)))) {
+ if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) {
continue
}
v.reset(OpARM64BFI)
- v.AuxInt = bfc
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
v.AddArg2(y, x)
return true
}
break
}
// match: (OR (UBFX [bfc] x) (ANDconst [ac] y))
- // cond: ac == ^(1<<uint(getARM64BFwidth(bfc))-1)
+ // cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
// result: (BFXIL [bfc] y x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpARM64UBFX {
continue
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpARM64ANDconst {
continue
}
- ac := v_1.AuxInt
+ ac := auxIntToInt64(v_1.AuxInt)
y := v_1.Args[0]
- if !(ac == ^(1<<uint(getARM64BFwidth(bfc)) - 1)) {
+ if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) {
continue
}
v.reset(OpARM64BFXIL)
- v.AuxInt = bfc
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
v.AddArg2(y, x)
return true
}
}
// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
if x0.Op != OpARM64MOVBUload {
continue
}
- i3 := x0.AuxInt
- s := x0.Aux
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o1.Args[1]
if x1.Op != OpARM64MOVBUload {
continue
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if x2.Op != OpARM64MOVBUload {
continue
}
- i1 := x2.AuxInt
- if x2.Aux != s {
+ i1 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
if x3.Op != OpARM64MOVBUload {
continue
}
- i0 := x3.AuxInt
- if x3.Aux != s {
+ i0 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
b = mergePoint(b, x0, x1, x2, x3)
v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
continue
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVBUload || x0.AuxInt != 3 {
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 3 {
continue
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o1.Args[1]
continue
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 1 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 3 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 3 {
continue
}
idx := x0_1.Args[0]
continue
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
continue
}
y2 := o0.Args[1]
continue
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] {
continue
}
y3 := v_1
}
// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
if x0.Op != OpARM64MOVBUload {
continue
}
- i7 := x0.AuxInt
- s := x0.Aux
+ i7 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o5.Args[1]
if x1.Op != OpARM64MOVBUload {
continue
}
- i6 := x1.AuxInt
- if x1.Aux != s {
+ i6 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if x2.Op != OpARM64MOVBUload {
continue
}
- i5 := x2.AuxInt
- if x2.Aux != s {
+ i5 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
if x3.Op != OpARM64MOVBUload {
continue
}
- i4 := x3.AuxInt
- if x3.Aux != s {
+ i4 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
if x4.Op != OpARM64MOVBUload {
continue
}
- i3 := x4.AuxInt
- if x4.Aux != s {
+ i3 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
if x5.Op != OpARM64MOVBUload {
continue
}
- i2 := x5.AuxInt
- if x5.Aux != s {
+ i2 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
if x6.Op != OpARM64MOVBUload {
continue
}
- i1 := x6.AuxInt
- if x6.Aux != s {
+ i1 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
if x7.Op != OpARM64MOVBUload {
continue
}
- i0 := x7.AuxInt
- if x7.Aux != s {
+ i0 := auxIntToInt32(x7.AuxInt)
+ if auxToSym(x7.Aux) != s {
continue
}
_ = x7.Args[1]
b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
continue
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVBUload || x0.AuxInt != 7 {
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 7 {
continue
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o5.Args[1]
continue
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 6 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 6 || auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
continue
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 4 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 4 || auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
continue
}
x4 := y4.Args[0]
- if x4.Op != OpARM64MOVBUload || x4.AuxInt != 3 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 3 || auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
continue
}
x5 := y5.Args[0]
- if x5.Op != OpARM64MOVBUload || x5.AuxInt != 2 || x5.Aux != s {
+ if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 2 || auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
continue
}
x6 := y6.Args[0]
- if x6.Op != OpARM64MOVBUload || x6.AuxInt != 1 || x6.Aux != s {
+ if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 1 || auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 7 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 7 {
continue
}
idx := x0_1.Args[0]
continue
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] {
continue
}
y2 := o4.Args[1]
continue
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
continue
}
y3 := o3.Args[1]
continue
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] {
continue
}
y4 := o2.Args[1]
continue
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] {
continue
}
y5 := o1.Args[1]
continue
}
x5_1 := x5.Args[1]
- if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+ if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] {
continue
}
y6 := o0.Args[1]
continue
}
x6_1 := x6.Args[1]
- if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+ if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] {
continue
}
y7 := v_1
}
// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
- // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
if x0.Op != OpARM64MOVBUload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o1.Args[1]
if x1.Op != OpARM64MOVBUload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if x2.Op != OpARM64MOVBUload {
continue
}
- i2 := x2.AuxInt
- if x2.Aux != s {
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
if x3.Op != OpARM64MOVBUload {
continue
}
- i3 := x3.AuxInt
- if x3.Aux != s {
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
v0 := b.NewValue0(x3.Pos, OpARM64REVW, t)
v.copyOf(v0)
v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
- v1.Aux = s
+ v1.Aux = symToAux(s)
v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
- v2.AuxInt = i0
+ v2.AuxInt = int64ToAuxInt(int64(i0))
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
continue
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
continue
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
continue
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
s0 := o1.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 24 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
continue
}
y0 := s0.Args[0]
continue
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
continue
}
y2 := o0.Args[1]
continue
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
continue
}
y3 := v_1
continue
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
continue
}
b = mergePoint(b, x0, x1, x2, x3)
}
// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
- // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
for {
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
if x0.Op != OpARM64MOVBUload {
continue
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o5.Args[1]
if x1.Op != OpARM64MOVBUload {
continue
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
continue
}
_ = x1.Args[1]
if x2.Op != OpARM64MOVBUload {
continue
}
- i2 := x2.AuxInt
- if x2.Aux != s {
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
if x3.Op != OpARM64MOVBUload {
continue
}
- i3 := x3.AuxInt
- if x3.Aux != s {
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
if x4.Op != OpARM64MOVBUload {
continue
}
- i4 := x4.AuxInt
- if x4.Aux != s {
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
if x5.Op != OpARM64MOVBUload {
continue
}
- i5 := x5.AuxInt
- if x5.Aux != s {
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
if x6.Op != OpARM64MOVBUload {
continue
}
- i6 := x6.AuxInt
- if x6.Aux != s {
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
if x7.Op != OpARM64MOVBUload {
continue
}
- i7 := x7.AuxInt
- if x7.Aux != s {
+ i7 := auxIntToInt32(x7.AuxInt)
+ if auxToSym(x7.Aux) != s {
continue
}
_ = x7.Args[1]
v0 := b.NewValue0(x7.Pos, OpARM64REV, t)
v.copyOf(v0)
v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
- v1.Aux = s
+ v1.Aux = symToAux(s)
v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
- v2.AuxInt = i0
+ v2.AuxInt = int64ToAuxInt(int64(i0))
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
continue
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
continue
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
continue
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 3 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
continue
}
x4 := y4.Args[0]
- if x4.Op != OpARM64MOVBUload || x4.AuxInt != 4 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 4 || auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
continue
}
x5 := y5.Args[0]
- if x5.Op != OpARM64MOVBUload || x5.AuxInt != 5 || x5.Aux != s {
+ if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 5 || auxToSym(x5.Aux) != s {
continue
}
_ = x5.Args[1]
continue
}
x6 := y6.Args[0]
- if x6.Op != OpARM64MOVBUload || x6.AuxInt != 6 || x6.Aux != s {
+ if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 6 || auxToSym(x6.Aux) != s {
continue
}
_ = x6.Args[1]
continue
}
x7 := y7.Args[0]
- if x7.Op != OpARM64MOVBUload || x7.AuxInt != 7 || x7.Aux != s {
+ if x7.Op != OpARM64MOVBUload || auxIntToInt32(x7.AuxInt) != 7 || auxToSym(x7.Aux) != s {
continue
}
_ = x7.Args[1]
t := v.Type
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 8 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
continue
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 16 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
continue
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 24 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
continue
}
_ = o2.Args[1]
o3 := o2.Args[0]
- if o3.Op != OpARM64ORshiftLL || o3.AuxInt != 32 {
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
continue
}
_ = o3.Args[1]
o4 := o3.Args[0]
- if o4.Op != OpARM64ORshiftLL || o4.AuxInt != 40 {
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
continue
}
_ = o4.Args[1]
o5 := o4.Args[0]
- if o5.Op != OpARM64ORshiftLL || o5.AuxInt != 48 {
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
continue
}
_ = o5.Args[1]
s0 := o5.Args[0]
- if s0.Op != OpARM64SLLconst || s0.AuxInt != 56 {
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
continue
}
y0 := s0.Args[0]
continue
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
continue
}
y2 := o4.Args[1]
continue
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
continue
}
y3 := o3.Args[1]
continue
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] {
continue
}
y4 := o2.Args[1]
continue
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] {
continue
}
y5 := o1.Args[1]
continue
}
x5_1 := x5.Args[1]
- if x5_1.Op != OpARM64ADDconst || x5_1.AuxInt != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+ if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] {
continue
}
y6 := o0.Args[1]
continue
}
x6_1 := x6.Args[1]
- if x6_1.Op != OpARM64ADDconst || x6_1.AuxInt != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+ if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] {
continue
}
y7 := v_1
continue
}
x7_1 := x7.Args[1]
- if x7_1.Op != OpARM64ADDconst || x7_1.AuxInt != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ if x7_1.Op != OpARM64ADDconst || auxIntToInt64(x7_1.AuxInt) != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
continue
}
b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64ORNshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64ORNshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64ORNshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
// match: (ORNshiftLL x (MOVDconst [c]) [d])
// result: (ORconst x [^int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = ^int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
// match: (ORNshiftRA x (MOVDconst [c]) [d])
// result: (ORconst x [^(c>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = ^(c >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
// match: (ORNshiftRL x (MOVDconst [c]) [d])
// result: (ORconst x [^int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [-1])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = -1
+ v.AuxInt = int64ToAuxInt(-1)
return true
}
return false
// cond: c2|c1 == ^0
// result: (ORconst [c1] x)
for {
- c1 := v.AuxInt
+ c1 := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- c2 := v_0.AuxInt
+ c2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(c2|c1 == ^0) {
break
}
v.reset(OpARM64ORconst)
- v.AuxInt = c1
+ v.AuxInt = int64ToAuxInt(c1)
v.AddArg(x)
return true
}
// match: (ORshiftLL (MOVDconst [c]) x [d])
// result: (ORconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ORshiftLL x (MOVDconst [c]) [d])
// result: (ORconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SLLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
// match: ( ORshiftLL [c] (SRLconst x [64-c]) x)
// result: (RORconst [64-c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg(x)
return true
}
// result: (RORWconst [32-c] x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg(x)
return true
}
// match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// result: (REV16W x)
for {
- if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
// match: ( ORshiftLL [c] (SRLconst x [64-c]) x2)
// result: (EXTRconst [64-c] x2 x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
x2 := v_1
v.reset(OpARM64EXTRconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg2(x2, x)
return true
}
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
x2 := v_1
if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg2(x2, x)
return true
}
// match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
- // cond: sc == getARM64BFwidth(bfc)
+ // cond: sc == bfc.getARM64BFwidth()
// result: (BFXIL [bfc] y x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if v_1.Op != OpARM64SRLconst || v_1.AuxInt != sc {
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != sc {
break
}
y := v_1.Args[0]
- if !(sc == getARM64BFwidth(bfc)) {
+ if !(sc == bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64BFXIL)
- v.AuxInt = bfc
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
v.AddArg2(y, x)
return true
}
// match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
- // result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
if x0.Op != OpARM64MOVBUload {
break
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := v_1
if x1.Op != OpARM64MOVBUload {
break
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
b = mergePoint(b, x0, x1)
v0 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
// result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
// result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
break
}
b = mergePoint(b, x0, x1)
}
// match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
// cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y1, y2, o0)
- // result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
if x0.Op != OpARM64MOVHUload {
break
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o0.Args[1]
if x1.Op != OpARM64MOVBUload {
break
}
- i2 := x1.AuxInt
- if x1.Aux != s {
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
if x2.Op != OpARM64MOVBUload {
break
}
- i3 := x2.AuxInt
- if x2.Aux != s {
+ i3 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
b = mergePoint(b, x0, x1, x2)
v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
break
}
y2 := v_1
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 3 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
break
}
b = mergePoint(b, x0, x1, x2)
// result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 2 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
- if p1.Op != OpARM64ADDshiftLL || p1.AuxInt != 1 {
+ if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 1 {
break
}
idx1 := p1.Args[1]
break
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 3 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
v.copyOf(v0)
v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type)
- v1.AuxInt = 1
+ v1.AuxInt = int64ToAuxInt(1)
v1.AddArg(idx0)
v0.AddArg3(ptr0, v1, mem)
return true
}
// match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
// cond: i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
- // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
if x0.Op != OpARM64MOVWUload {
break
}
- i0 := x0.AuxInt
- s := x0.Aux
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o2.Args[1]
if x1.Op != OpARM64MOVBUload {
break
}
- i4 := x1.AuxInt
- if x1.Aux != s {
+ i4 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
if x2.Op != OpARM64MOVBUload {
break
}
- i5 := x2.AuxInt
- if x2.Aux != s {
+ i5 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
if x3.Op != OpARM64MOVBUload {
break
}
- i6 := x3.AuxInt
- if x3.Aux != s {
+ i6 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[1]
if x4.Op != OpARM64MOVBUload {
break
}
- i7 := x4.AuxInt
- if x4.Aux != s {
+ i7 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[1]
b = mergePoint(b, x0, x1, x2, x3, x4)
v0 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
v.copyOf(v0)
- v0.Aux = s
+ v0.Aux = symToAux(s)
v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
- v1.AuxInt = i0
+ v1.AuxInt = int64ToAuxInt(int64(i0))
v1.AddArg(p)
v0.AddArg2(v1, mem)
return true
// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 4 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
if p1.Op != OpARM64ADD {
continue
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
continue
}
_ = x2.Args[1]
continue
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
continue
}
_ = x3.Args[1]
continue
}
x4 := y4.Args[0]
- if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
continue
}
_ = x4.Args[1]
// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 4 {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
break
}
- s := x1.Aux
+ s := auxToSym(x1.Aux)
_ = x1.Args[1]
p1 := x1.Args[0]
- if p1.Op != OpARM64ADDshiftLL || p1.AuxInt != 2 {
+ if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 2 {
break
}
idx1 := p1.Args[1]
break
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 5 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
break
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 6 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[1]
break
}
x4 := y4.Args[0]
- if x4.Op != OpARM64MOVBUload || x4.AuxInt != 7 || x4.Aux != s {
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[1]
v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t)
v.copyOf(v0)
v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type)
- v1.AuxInt = 2
+ v1.AuxInt = int64ToAuxInt(2)
v1.AddArg(idx0)
v0.AddArg3(ptr0, v1, mem)
return true
// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 4 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 4 || idx != x1_1.Args[0] || mem != x1.Args[2] {
break
}
y2 := o1.Args[1]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
break
}
y3 := o0.Args[1]
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] {
break
}
y4 := v_1
break
}
x4_1 := x4.Args[1]
- if x4_1.Op != OpARM64ADDconst || x4_1.AuxInt != 7 || idx != x4_1.Args[0] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 7 || idx != x4_1.Args[0] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
break
}
b = mergePoint(b, x0, x1, x2, x3, x4)
// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
if x0.Op != OpARM64MOVBUload {
break
}
- i1 := x0.AuxInt
- s := x0.Aux
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := v_1
if x1.Op != OpARM64MOVBUload {
break
}
- i0 := x1.AuxInt
- if x1.Aux != s {
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
v0 := b.NewValue0(x1.Pos, OpARM64REV16W, t)
v.copyOf(v0)
v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
- v1.AuxInt = i0
- v1.Aux = s
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
v1.AddArg2(p, mem)
v0.AddArg(v1)
return true
// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
break
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVBUload || x0.AuxInt != 1 {
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 1 {
break
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p1 := x0.Args[0]
if p1.Op != OpARM64ADD {
// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
for {
t := v.Type
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
y0 := v_0
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 1 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 {
break
}
idx := x0_1.Args[0]
}
// match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)
- // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
if x0.Op != OpARM64MOVHUload {
break
}
- i2 := x0.AuxInt
- s := x0.Aux
+ i2 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o0.Args[1]
if x1.Op != OpARM64MOVBUload {
break
}
- i1 := x1.AuxInt
- if x1.Aux != s {
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
if x2.Op != OpARM64MOVBUload {
break
}
- i0 := x2.AuxInt
- if x2.Aux != s {
+ i0 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
v0 := b.NewValue0(x2.Pos, OpARM64REVW, t)
v.copyOf(v0)
v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
- v1.Aux = s
+ v1.Aux = symToAux(s)
v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
- v2.AuxInt = i0
+ v2.AuxInt = int64ToAuxInt(int64(i0))
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
break
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVHUload || x0.AuxInt != 2 {
+ if x0.Op != OpARM64MOVHUload || auxIntToInt32(x0.AuxInt) != 2 {
break
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o0.Args[1]
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 1 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
for {
t := v.Type
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 16 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
break
}
_ = o0.Args[1]
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 2 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 {
break
}
idx := x0_1.Args[0]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
break
}
y2 := v_1
}
// match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
- // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem))
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
if x0.Op != OpARM64MOVWUload {
break
}
- i4 := x0.AuxInt
- s := x0.Aux
+ i4 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o2.Args[1]
if x1.Op != OpARM64MOVBUload {
break
}
- i3 := x1.AuxInt
- if x1.Aux != s {
+ i3 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
if x2.Op != OpARM64MOVBUload {
break
}
- i2 := x2.AuxInt
- if x2.Aux != s {
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
if x3.Op != OpARM64MOVBUload {
break
}
- i1 := x3.AuxInt
- if x3.Aux != s {
+ i1 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[1]
if x4.Op != OpARM64MOVBUload {
break
}
- i0 := x4.AuxInt
- if x4.Aux != s {
+ i0 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
break
}
_ = x4.Args[1]
v0 := b.NewValue0(x4.Pos, OpARM64REV, t)
v.copyOf(v0)
v1 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
- v1.Aux = s
+ v1.Aux = symToAux(s)
v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
- v2.AuxInt = i0
+ v2.AuxInt = int64ToAuxInt(int64(i0))
v2.AddArg(p)
v1.AddArg2(v2, mem)
v0.AddArg(v1)
// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
break
}
x0 := y0.Args[0]
- if x0.Op != OpARM64MOVWUload || x0.AuxInt != 4 {
+ if x0.Op != OpARM64MOVWUload || auxIntToInt32(x0.AuxInt) != 4 {
break
}
- s := x0.Aux
+ s := auxToSym(x0.Aux)
mem := x0.Args[1]
p := x0.Args[0]
y1 := o2.Args[1]
break
}
x1 := y1.Args[0]
- if x1.Op != OpARM64MOVBUload || x1.AuxInt != 3 || x1.Aux != s {
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 3 || auxToSym(x1.Aux) != s {
break
}
_ = x1.Args[1]
break
}
x2 := y2.Args[0]
- if x2.Op != OpARM64MOVBUload || x2.AuxInt != 2 || x2.Aux != s {
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
break
}
_ = x2.Args[1]
break
}
x3 := y3.Args[0]
- if x3.Op != OpARM64MOVBUload || x3.AuxInt != 1 || x3.Aux != s {
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 1 || auxToSym(x3.Aux) != s {
break
}
_ = x3.Args[1]
// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
for {
t := v.Type
- if v.AuxInt != 56 {
+ if auxIntToInt64(v.AuxInt) != 56 {
break
}
o0 := v_0
- if o0.Op != OpARM64ORshiftLL || o0.AuxInt != 48 {
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
break
}
_ = o0.Args[1]
o1 := o0.Args[0]
- if o1.Op != OpARM64ORshiftLL || o1.AuxInt != 40 {
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
break
}
_ = o1.Args[1]
o2 := o1.Args[0]
- if o2.Op != OpARM64ORshiftLL || o2.AuxInt != 32 {
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
break
}
_ = o2.Args[1]
mem := x0.Args[2]
ptr := x0.Args[0]
x0_1 := x0.Args[1]
- if x0_1.Op != OpARM64ADDconst || x0_1.AuxInt != 4 {
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 4 {
break
}
idx := x0_1.Args[0]
break
}
x1_1 := x1.Args[1]
- if x1_1.Op != OpARM64ADDconst || x1_1.AuxInt != 3 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 3 || idx != x1_1.Args[0] || mem != x1.Args[2] {
break
}
y2 := o1.Args[1]
break
}
x2_1 := x2.Args[1]
- if x2_1.Op != OpARM64ADDconst || x2_1.AuxInt != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
break
}
y3 := o0.Args[1]
break
}
x3_1 := x3.Args[1]
- if x3_1.Op != OpARM64ADDconst || x3_1.AuxInt != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] {
break
}
y4 := v_1
// match: (ORshiftRA (MOVDconst [c]) x [d])
// result: (ORconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ORshiftRA x (MOVDconst [c]) [d])
// result: (ORconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SRAconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
// match: (ORshiftRL (MOVDconst [c]) x [d])
// result: (ORconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64ORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (ORshiftRL x (MOVDconst [c]) [d])
// result: (ORconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64ORconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: y
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
y := v_1
if y.Op != OpARM64SRLconst {
break
}
- c := y.AuxInt
+ c := auxIntToInt64(y.AuxInt)
if x != y.Args[0] || !(c == d) {
break
}
// match: ( ORshiftRL [c] (SLLconst x [64-c]) x)
// result: (RORconst [ c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// result: (RORWconst [c] x)
for {
t := v.Type
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// cond: lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
// result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- ac := v_0.AuxInt
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if v_1.Op != OpARM64SLLconst {
break
}
- lc := v_1.AuxInt
+ lc := auxIntToInt64(v_1.AuxInt)
y := v_1.Args[0]
if !(lc > rc && ac == ^((1<<uint(64-lc)-1)<<uint64(lc-rc))) {
break
}
v.reset(OpARM64BFI)
- v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
v.AddArg2(x, y)
return true
}
// cond: lc < rc && ac == ^((1<<uint(64-rc)-1))
// result: (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- ac := v_0.AuxInt
+ ac := auxIntToInt64(v_0.AuxInt)
y := v_0.Args[0]
if v_1.Op != OpARM64SLLconst {
break
}
- lc := v_1.AuxInt
+ lc := auxIntToInt64(v_1.AuxInt)
x := v_1.Args[0]
if !(lc < rc && ac == ^(1<<uint(64-rc)-1)) {
break
}
v.reset(OpARM64BFXIL)
- v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
v.AddArg2(y, x)
return true
}
// match: (RORWconst [c] (RORWconst [d] x))
// result: (RORWconst [(c+d)&31] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64RORWconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpARM64RORWconst)
- v.AuxInt = (c + d) & 31
+ v.AuxInt = int64ToAuxInt((c + d) & 31)
v.AddArg(x)
return true
}
// match: (RORconst [c] (RORconst [d] x))
// result: (RORconst [(c+d)&63] x)
for {
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64RORconst {
break
}
- d := v_0.AuxInt
+ d := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
v.reset(OpARM64RORconst)
- v.AuxInt = (c + d) & 63
+ v.AuxInt = int64ToAuxInt((c + d) & 63)
v.AddArg(x)
return true
}
// cond: 0 < c && c < 64
// result: (ANDconst [^(1<<uint(c)-1)] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = ^(1<<uint(c) - 1)
+ v.AuxInt = int64ToAuxInt(^(1<<uint(c) - 1))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, ac, 0)
// result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- ac := v_0.AuxInt
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, 0)) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, 0))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, 1<<32-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 32)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, 32)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 32))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, 1<<16-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 16)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, 16)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 16))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, 1<<8-1, 0)
// result: (UBFIZ [armBFAuxInt(sc, 8)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc, 8)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, 8))
v.AddArg(x)
return true
}
// match: (SLLconst [sc] (UBFIZ [bfc] x))
- // cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) {
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// cond: lc > rc
// result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc > rc) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
v.AddArg(x)
return true
}
// cond: lc <= rc
// result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc <= rc) {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
v.AddArg(x)
return true
}
// cond: rc < 32
// result: (SBFX [armBFAuxInt(rc, 32-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWreg {
break
}
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(rc, 32-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
v.AddArg(x)
return true
}
// cond: rc < 16
// result: (SBFX [armBFAuxInt(rc, 16-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHreg {
break
}
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(rc, 16-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
v.AddArg(x)
return true
}
// cond: rc < 8
// result: (SBFX [armBFAuxInt(rc, 8-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBreg {
break
}
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(rc, 8-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
v.AddArg(x)
return true
}
// match: (SRAconst [sc] (SBFIZ [bfc] x))
- // cond: sc < getARM64BFlsb(bfc)
- // result: (SBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFlsb(bfc)) {
+ if !(sc < bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64SBFIZ)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// match: (SRAconst [sc] (SBFIZ [bfc] x))
- // cond: sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (SBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+ if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64SBFX)
- v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
// cond: 0 < c && c < 64
// result: (ANDconst [1<<uint(64-c)-1] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = 1<<uint(64-c) - 1
+ v.AuxInt = int64ToAuxInt(1<<uint(64-c) - 1)
v.AddArg(x)
return true
}
// cond: lc > rc
// result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc > rc) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(lc-rc, 64-lc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, ac, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64ANDconst {
break
}
- ac := v_0.AuxInt
+ ac := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(isARM64BFMask(sc, ac, sc)) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(ac, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, 1<<32-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVWUreg {
break
}
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<32-1, sc)))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, 1<<16-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVHUreg {
break
}
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<16-1, sc)))
v.AddArg(x)
return true
}
// cond: isARM64BFMask(sc, 1<<8-1, sc)
// result: (UBFX [armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVBUreg {
break
}
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(1<<8-1, sc)))
v.AddArg(x)
return true
}
// cond: lc < rc
// result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
for {
- rc := v.AuxInt
+ rc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- lc := v_0.AuxInt
+ lc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
if !(lc < rc) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(rc-lc, 64-rc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFX [bfc] x))
- // cond: sc < getARM64BFwidth(bfc)
- // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFwidth(bfc)) {
+ if !(sc < bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc == getARM64BFlsb(bfc)
- // result: (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc == getARM64BFlsb(bfc)) {
+ if !(sc == bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = 1<<uint(getARM64BFwidth(bfc)) - 1
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc < getARM64BFlsb(bfc)
- // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFlsb(bfc)) {
+ if !(sc < bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// match: (SRLconst [sc] (UBFIZ [bfc] x))
- // cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (UBFX [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
for {
- sc := v.AuxInt
+ sc := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFIZ {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
if x1.Op != OpARM64SLLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64SUBshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64SUBshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
break
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
break
}
v.reset(OpARM64SUBshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
// match: (SUBshiftLL x (MOVDconst [c]) [d])
// result: (SUBconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64SUBconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
// match: (SUBshiftRA x (MOVDconst [c]) [d])
// result: (SUBconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64SUBconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
// match: (SUBshiftRL x (MOVDconst [c]) [d])
// result: (SUBconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64SUBconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64TSTshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64TSTshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64TSTshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
// match: (TSTshiftLL (MOVDconst [c]) x [d])
// result: (TSTconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64TSTconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (TSTshiftLL x (MOVDconst [c]) [d])
// result: (TSTconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64TSTconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// match: (TSTshiftRA (MOVDconst [c]) x [d])
// result: (TSTconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64TSTconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (TSTshiftRA x (MOVDconst [c]) [d])
// result: (TSTconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64TSTconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
// match: (TSTshiftRL (MOVDconst [c]) x [d])
// result: (TSTconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64TSTconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (TSTshiftRL x (MOVDconst [c]) [d])
// result: (TSTconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64TSTconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
v_0 := v.Args[0]
// match: (UBFIZ [bfc] (SLLconst [sc] x))
- // cond: sc < getARM64BFwidth(bfc)
- // result: (UBFIZ [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFwidth(bfc)) {
+ if !(sc < bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
v_0 := v.Args[0]
// match: (UBFX [bfc] (SRLconst [sc] x))
- // cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64
- // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x)
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SRLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) {
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc == getARM64BFlsb(bfc)
- // result: (ANDconst [1<<uint(getARM64BFwidth(bfc))-1] x)
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc == getARM64BFlsb(bfc)) {
+ if !(sc == bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64ANDconst)
- v.AuxInt = 1<<uint(getARM64BFwidth(bfc)) - 1
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc < getARM64BFlsb(bfc)
- // result: (UBFX [armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x)
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc < getARM64BFlsb(bfc)) {
+ if !(sc < bfc.getARM64BFlsb()) {
break
}
v.reset(OpARM64UBFX)
- v.AuxInt = armBFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
v.AddArg(x)
return true
}
// match: (UBFX [bfc] (SLLconst [sc] x))
- // cond: sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)
- // result: (UBFIZ [armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x)
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
for {
- bfc := v.AuxInt
+ bfc := auxIntToArm64BitField(v.AuxInt)
if v_0.Op != OpARM64SLLconst {
break
}
- sc := v_0.AuxInt
+ sc := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0]
- if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) {
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
break
}
v.reset(OpARM64UBFIZ)
- v.AuxInt = armBFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
v.AddArg(x)
return true
}
if x1.Op != OpARM64SLLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64XORshiftLL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRLconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64XORshiftRL)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
if x1.Op != OpARM64SRAconst {
continue
}
- c := x1.AuxInt
+ c := auxIntToInt64(x1.AuxInt)
y := x1.Args[0]
if !(clobberIfDead(x1)) {
continue
}
v.reset(OpARM64XORshiftRA)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg2(x0, y)
return true
}
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 63 {
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 64 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 63 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 64 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64ROR)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
continue
}
t := v_0_1.Type
- if v_0_1.AuxInt != 31 {
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
continue
}
y := v_0_1.Args[0]
if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
continue
}
- cc := v_1.Aux
+ cc := auxToCCop(v_1.Aux)
_ = v_1.Args[1]
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpARM64SLL {
}
_ = v_1_0_1.Args[1]
v_1_0_1_0 := v_1_0_1.Args[0]
- if v_1_0_1_0.Op != OpARM64MOVDconst || v_1_0_1_0.AuxInt != 32 {
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
continue
}
v_1_0_1_1 := v_1_0_1.Args[1]
- if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || v_1_0_1_1.AuxInt != 31 || y != v_1_0_1_1.Args[0] {
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
continue
}
v_1_1 := v_1.Args[1]
- if v_1_1.Op != OpARM64CMPconst || v_1_1.AuxInt != 64 {
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
continue
}
v_1_1_0 := v_1_1.Args[0]
}
_ = v_1_1_0.Args[1]
v_1_1_0_0 := v_1_1_0.Args[0]
- if v_1_1_0_0.Op != OpARM64MOVDconst || v_1_1_0_0.AuxInt != 32 {
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
continue
}
v_1_1_0_1 := v_1_1_0.Args[1]
- if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || v_1_1_0_1.AuxInt != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
continue
}
v.reset(OpARM64RORW)
// match: (XORshiftLL (MOVDconst [c]) x [d])
// result: (XORconst [c] (SLLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64XORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (XORshiftLL x (MOVDconst [c]) [d])
// result: (XORconst x [int64(uint64(c)<<uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SLLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (XORshiftLL [c] (SRLconst x [64-c]) x)
// result: (RORconst [64-c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg(x)
return true
}
// result: (RORWconst [32-c] x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg(x)
return true
}
// match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
// result: (REV16W x)
for {
- if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || v_0.AuxInt != armBFAuxInt(8, 8) {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
break
}
x := v_0.Args[0]
// match: (XORshiftLL [c] (SRLconst x [64-c]) x2)
// result: (EXTRconst [64-c] x2 x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SRLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
x2 := v_1
v.reset(OpARM64EXTRconst)
- v.AuxInt = 64 - c
+ v.AuxInt = int64ToAuxInt(64 - c)
v.AddArg2(x2, x)
return true
}
// result: (EXTRWconst [32-c] x2 x)
for {
t := v.Type
- c := v.AuxInt
+ c := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64UBFX {
break
}
- bfc := v_0.AuxInt
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
x := v_0.Args[0]
x2 := v_1
if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
break
}
v.reset(OpARM64EXTRWconst)
- v.AuxInt = 32 - c
+ v.AuxInt = int64ToAuxInt(32 - c)
v.AddArg2(x2, x)
return true
}
// match: (XORshiftRA (MOVDconst [c]) x [d])
// result: (XORconst [c] (SRAconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64XORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (XORshiftRA x (MOVDconst [c]) [d])
// result: (XORconst x [c>>uint64(d)])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = c >> uint64(d)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRAconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
return false
// match: (XORshiftRL (MOVDconst [c]) x [d])
// result: (XORconst [c] (SRLconst <x.Type> x [d]))
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
if v_0.Op != OpARM64MOVDconst {
break
}
- c := v_0.AuxInt
+ c := auxIntToInt64(v_0.AuxInt)
x := v_1
v.reset(OpARM64XORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
- v0.AuxInt = d
+ v0.AuxInt = int64ToAuxInt(d)
v0.AddArg(x)
v.AddArg(v0)
return true
// match: (XORshiftRL x (MOVDconst [c]) [d])
// result: (XORconst x [int64(uint64(c)>>uint64(d))])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64MOVDconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
v.reset(OpARM64XORconst)
- v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
v.AddArg(x)
return true
}
// cond: c==d
// result: (MOVDconst [0])
for {
- d := v.AuxInt
+ d := auxIntToInt64(v.AuxInt)
x := v_0
if v_1.Op != OpARM64SRLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
if x != v_1.Args[0] || !(c == d) {
break
}
v.reset(OpARM64MOVDconst)
- v.AuxInt = 0
+ v.AuxInt = int64ToAuxInt(0)
return true
}
// match: (XORshiftRL [c] (SLLconst x [64-c]) x)
// result: (RORconst [ c] x)
for {
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 64-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}
// result: (RORWconst [c] x)
for {
t := v.Type
- c := v.AuxInt
- if v_0.Op != OpARM64SLLconst || v_0.AuxInt != 32-c {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
break
}
x := v_0.Args[0]
break
}
v.reset(OpARM64RORWconst)
- v.AuxInt = c
+ v.AuxInt = int64ToAuxInt(c)
v.AddArg(x)
return true
}