(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y)
-// Prefer SARX/SHLX/SHRX instruction because it has less register restriction on the shift input.
-(SAR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SARX(Q|L) x y)
-(SHL(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SHLX(Q|L) x y)
-(SHR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SHRX(Q|L) x y)
-
// Lowering integer comparisons
(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y))
(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y))
// mutandis, for UGE and SETAE, and CC and SETCC.
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
-((NE|EQ) (TESTL (SHLXL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
-((NE|EQ) (TESTQ (SHLXQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
=> ((ULT|UGE) (BTLconst [int8(log32(c))] x))
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
=> ((ULT|UGE) (BTQconst [int8(log64(c))] x))
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
-(SET(NE|EQ) (TESTL (SHLXL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
-(SET(NE|EQ) (TESTQ (SHLXQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
=> (SET(B|AE) (BTLconst [int8(log32(c))] x))
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
=> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
-(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLXL (MOVLconst [1]) x) y) mem)
- => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
-(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLXQ (MOVQconst [1]) x) y) mem)
- => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
=> (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x)
(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
-(BT(Q|L)const [0] s:(SHRXQ x y)) => (BTQ y x)
(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x)
(BTLconst [0] s:(SHR(L|XL) x y)) => (BTL y x)
// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
-(OR(Q|L) (SHLX(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
-(XOR(Q|L) (SHLX(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
// Convert ORconst into BTS, if the code gets smaller, with boundary being
// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
// Recognize bit clearing: a &^= 1<<b
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
-(AND(Q|L) (NOT(Q|L) (SHLX(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
-(ANDN(Q|L) x (SHLX(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
(ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
=> (BTRQconst [int8(log32(^c))] x)
(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
-(SHLXQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
-(SHLXL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
-(SHRXQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
-(SHRXL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
-(SARXQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
-(SARXL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
// Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
-((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x y)
-((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGQ <t> y))
-((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x y)
-((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGQ <t> y))
-
-((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
-((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGQ <t> y))
-((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
-((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGQ <t> y))
-
-((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x y)
-((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGL <t> y))
-((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x y)
-((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ|SHLXQ|SHRXQ|SARXQ) x (NEGL <t> y))
-
-((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
-((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL <t> y))
-((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y)
-((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL <t> y))
+((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
+((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
+
+((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
+((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
+
+((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
+((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
+
+((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
+((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
// rotate left negative = rotate right
(ROLQ x (NEG(Q|L) y)) => (RORQ x y)
&& clobber(x0, x1, sh)
=> @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem)
-(SARX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem)
-(SHLX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
-(SHRX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
+(SAR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem)
+(SHL(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
+(SHR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVQconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
return rewriteValueAMD64_OpAMD64SARW(v)
case OpAMD64SARWconst:
return rewriteValueAMD64_OpAMD64SARWconst(v)
- case OpAMD64SARXL:
- return rewriteValueAMD64_OpAMD64SARXL(v)
case OpAMD64SARXLload:
return rewriteValueAMD64_OpAMD64SARXLload(v)
- case OpAMD64SARXQ:
- return rewriteValueAMD64_OpAMD64SARXQ(v)
case OpAMD64SARXQload:
return rewriteValueAMD64_OpAMD64SARXQload(v)
case OpAMD64SBBLcarrymask:
return rewriteValueAMD64_OpAMD64SHLQ(v)
case OpAMD64SHLQconst:
return rewriteValueAMD64_OpAMD64SHLQconst(v)
- case OpAMD64SHLXL:
- return rewriteValueAMD64_OpAMD64SHLXL(v)
case OpAMD64SHLXLload:
return rewriteValueAMD64_OpAMD64SHLXLload(v)
- case OpAMD64SHLXQ:
- return rewriteValueAMD64_OpAMD64SHLXQ(v)
case OpAMD64SHLXQload:
return rewriteValueAMD64_OpAMD64SHLXQload(v)
case OpAMD64SHRB:
return rewriteValueAMD64_OpAMD64SHRW(v)
case OpAMD64SHRWconst:
return rewriteValueAMD64_OpAMD64SHRWconst(v)
- case OpAMD64SHRXL:
- return rewriteValueAMD64_OpAMD64SHRXL(v)
case OpAMD64SHRXLload:
return rewriteValueAMD64_OpAMD64SHRXLload(v)
- case OpAMD64SHRXQ:
- return rewriteValueAMD64_OpAMD64SHRXQ(v)
case OpAMD64SHRXQload:
return rewriteValueAMD64_OpAMD64SHRXQload(v)
case OpAMD64SUBL:
}
break
}
- // match: (ANDL (NOTL (SHLXL (MOVLconst [1]) y)) x)
- // result: (BTRL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64NOTL {
- continue
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64SHLXL {
- continue
- }
- y := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
- continue
- }
- x := v_1
- v.reset(OpAMD64BTRL)
- v.AddArg2(x, y)
- return true
- }
- break
- }
// match: (ANDL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
// result: (BTRLconst [int8(log32(^c))] x)
v.AddArg2(x, y)
return true
}
- // match: (ANDNL x (SHLXL (MOVLconst [1]) y))
- // result: (BTRL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64SHLXL {
- break
- }
- y := v_1.Args[1]
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
- break
- }
- v.reset(OpAMD64BTRL)
- v.AddArg2(x, y)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
v.AddArg2(x, y)
return true
}
- // match: (ANDNQ x (SHLXQ (MOVQconst [1]) y))
- // result: (BTRQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64SHLXQ {
- break
- }
- y := v_1.Args[1]
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
- break
- }
- v.reset(OpAMD64BTRQ)
- v.AddArg2(x, y)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
}
break
}
- // match: (ANDQ (NOTQ (SHLXQ (MOVQconst [1]) y)) x)
- // result: (BTRQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64NOTQ {
- continue
- }
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64SHLXQ {
- continue
- }
- y := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- continue
- }
- x := v_1
- v.reset(OpAMD64BTRQ)
- v.AddArg2(x, y)
- return true
- }
- break
- }
// match: (ANDQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
// result: (BTRQconst [int8(log64(^c))] x)
v.AddArg2(y, x)
return true
}
- // match: (BTLconst [0] s:(SHRXQ x y))
- // result: (BTQ y x)
- for {
- if auxIntToInt8(v.AuxInt) != 0 {
- break
- }
- s := v_0
- if s.Op != OpAMD64SHRXQ {
- break
- }
- y := s.Args[1]
- x := s.Args[0]
- v.reset(OpAMD64BTQ)
- v.AddArg2(y, x)
- return true
- }
// match: (BTLconst [c] (SHRLconst [d] x))
// cond: (c+d)<32
// result: (BTLconst [c+d] x)
v.AddArg2(y, x)
return true
}
- // match: (BTQconst [0] s:(SHRXQ x y))
- // result: (BTQ y x)
- for {
- if auxIntToInt8(v.AuxInt) != 0 {
- break
- }
- s := v_0
- if s.Op != OpAMD64SHRXQ {
- break
- }
- y := s.Args[1]
- x := s.Args[0]
- v.reset(OpAMD64BTQ)
- v.AddArg2(y, x)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
}
break
}
- // match: (ORL (SHLXL (MOVLconst [1]) y) x)
- // result: (BTSL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXL {
- continue
- }
- y := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
- continue
- }
- x := v_1
- v.reset(OpAMD64BTSL)
- v.AddArg2(x, y)
- return true
- }
- break
- }
// match: (ORL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
// result: (BTSLconst [int8(log32(c))] x)
}
break
}
- // match: (ORQ (SHLXQ (MOVQconst [1]) y) x)
- // result: (BTSQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXQ {
- continue
- }
- y := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
- continue
- }
- x := v_1
- v.reset(OpAMD64BTSQ)
- v.AddArg2(x, y)
- return true
- }
- break
- }
// match: (ORQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
// result: (BTSQconst [int8(log64(c))] x)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (SARL x y)
- // cond: buildcfg.GOAMD64 >= 3
- // result: (SARXL x y)
- for {
- x := v_0
- y := v_1
- if !(buildcfg.GOAMD64 >= 3) {
- break
- }
- v.reset(OpAMD64SARXL)
- v.AddArg2(x, y)
- return true
- }
// match: (SARL x (MOVQconst [c]))
// result: (SARLconst [int8(c&31)] x)
for {
v.AddArg2(x, v0)
return true
}
+ // match: (SARL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SARXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SARXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (SARQ x y)
- // cond: buildcfg.GOAMD64 >= 3
- // result: (SARXQ x y)
- for {
- x := v_0
- y := v_1
- if !(buildcfg.GOAMD64 >= 3) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v.AddArg2(x, y)
- return true
- }
// match: (SARQ x (MOVQconst [c]))
// result: (SARQconst [int8(c&63)] x)
for {
v.AddArg2(x, v0)
return true
}
+ // match: (SARQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SARXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SARXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
}
return false
}
-func rewriteValueAMD64_OpAMD64SARXL(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (SARXL x (MOVQconst [c]))
- // result: (SARLconst [int8(c&31)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := auxIntToInt64(v_1.AuxInt)
- v.reset(OpAMD64SARLconst)
- v.AuxInt = int8ToAuxInt(int8(c & 31))
- v.AddArg(x)
- return true
- }
- // match: (SARXL x (MOVLconst [c]))
- // result: (SARLconst [int8(c&31)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpAMD64SARLconst)
- v.AuxInt = int8ToAuxInt(int8(c & 31))
- v.AddArg(x)
- return true
- }
- // match: (SARXL x (ADDQconst [c] y))
- // cond: c & 31 == 0
- // result: (SARXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SARXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SARXL x (NEGQ <t> (ADDQconst [c] y)))
- // cond: c & 31 == 0
- // result: (SARXL x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SARXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SARXL x (ANDQconst [c] y))
- // cond: c & 31 == 31
- // result: (SARXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SARXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SARXL x (NEGQ <t> (ANDQconst [c] y)))
- // cond: c & 31 == 31
- // result: (SARXL x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SARXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SARXL x (ADDLconst [c] y))
- // cond: c & 31 == 0
- // result: (SARXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SARXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SARXL x (NEGL <t> (ADDLconst [c] y)))
- // cond: c & 31 == 0
- // result: (SARXL x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SARXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SARXL x (ANDLconst [c] y))
- // cond: c & 31 == 31
- // result: (SARXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SARXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SARXL x (NEGL <t> (ANDLconst [c] y)))
- // cond: c & 31 == 31
- // result: (SARXL x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SARXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SARXL l:(MOVLload [off] {sym} ptr mem) x)
- // cond: canMergeLoad(v, l) && clobber(l)
- // result: (SARXLload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SARXLload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
}
return false
}
-func rewriteValueAMD64_OpAMD64SARXQ(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (SARXQ x (MOVQconst [c]))
- // result: (SARQconst [int8(c&63)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := auxIntToInt64(v_1.AuxInt)
- v.reset(OpAMD64SARQconst)
- v.AuxInt = int8ToAuxInt(int8(c & 63))
- v.AddArg(x)
- return true
- }
- // match: (SARXQ x (MOVLconst [c]))
- // result: (SARQconst [int8(c&63)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpAMD64SARQconst)
- v.AuxInt = int8ToAuxInt(int8(c & 63))
- v.AddArg(x)
- return true
- }
- // match: (SARXQ x (ADDQconst [c] y))
- // cond: c & 63 == 0
- // result: (SARXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SARXQ x (NEGQ <t> (ADDQconst [c] y)))
- // cond: c & 63 == 0
- // result: (SARXQ x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SARXQ x (ANDQconst [c] y))
- // cond: c & 63 == 63
- // result: (SARXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SARXQ x (NEGQ <t> (ANDQconst [c] y)))
- // cond: c & 63 == 63
- // result: (SARXQ x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SARXQ x (ADDLconst [c] y))
- // cond: c & 63 == 0
- // result: (SARXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SARXQ x (NEGL <t> (ADDLconst [c] y)))
- // cond: c & 63 == 0
- // result: (SARXQ x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SARXQ x (ANDLconst [c] y))
- // cond: c & 63 == 63
- // result: (SARXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SARXQ x (NEGL <t> (ANDLconst [c] y)))
- // cond: c & 63 == 63
- // result: (SARXQ x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SARXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SARXQ l:(MOVQload [off] {sym} ptr mem) x)
- // cond: canMergeLoad(v, l) && clobber(l)
- // result: (SARXQload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SARXQload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
}
break
}
- // match: (SETEQ (TESTL (SHLXL (MOVLconst [1]) x) y))
- // result: (SETAE (BTL x y))
- for {
- if v_0.Op != OpAMD64TESTL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpAMD64SHLXL {
- continue
- }
- x := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
- continue
- }
- y := v_0_1
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg2(x, y)
- v.AddArg(v0)
- return true
- }
- break
- }
- // match: (SETEQ (TESTQ (SHLXQ (MOVQconst [1]) x) y))
- // result: (SETAE (BTQ x y))
- for {
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpAMD64SHLXQ {
- continue
- }
- x := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- continue
- }
- y := v_0_1
- v.reset(OpAMD64SETAE)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg2(x, y)
- v.AddArg(v0)
- return true
- }
- break
- }
// match: (SETEQ (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETAE (BTLconst [int8(log32(c))] x))
}
break
}
- // match: (SETEQstore [off] {sym} ptr (TESTL (SHLXL (MOVLconst [1]) x) y) mem)
- // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
- for {
- off := auxIntToInt32(v.AuxInt)
- sym := auxToSym(v.Aux)
- ptr := v_0
- if v_1.Op != OpAMD64TESTL {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLXL {
- continue
- }
- x := v_1_0.Args[1]
- v_1_0_0 := v_1_0.Args[0]
- if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
- continue
- }
- y := v_1_1
- mem := v_2
- v.reset(OpAMD64SETAEstore)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg2(x, y)
- v.AddArg3(ptr, v0, mem)
- return true
- }
- break
- }
- // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLXQ (MOVQconst [1]) x) y) mem)
- // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
- for {
- off := auxIntToInt32(v.AuxInt)
- sym := auxToSym(v.Aux)
- ptr := v_0
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLXQ {
- continue
- }
- x := v_1_0.Args[1]
- v_1_0_0 := v_1_0.Args[0]
- if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
- continue
- }
- y := v_1_1
- mem := v_2
- v.reset(OpAMD64SETAEstore)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg2(x, y)
- v.AddArg3(ptr, v0, mem)
- return true
- }
- break
- }
// match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
}
break
}
- // match: (SETNE (TESTL (SHLXL (MOVLconst [1]) x) y))
- // result: (SETB (BTL x y))
- for {
- if v_0.Op != OpAMD64TESTL {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpAMD64SHLXL {
- continue
- }
- x := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
- continue
- }
- y := v_0_1
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg2(x, y)
- v.AddArg(v0)
- return true
- }
- break
- }
- // match: (SETNE (TESTQ (SHLXQ (MOVQconst [1]) x) y))
- // result: (SETB (BTQ x y))
- for {
- if v_0.Op != OpAMD64TESTQ {
- break
- }
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpAMD64SHLXQ {
- continue
- }
- x := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- continue
- }
- y := v_0_1
- v.reset(OpAMD64SETB)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg2(x, y)
- v.AddArg(v0)
- return true
- }
- break
- }
// match: (SETNE (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETB (BTLconst [int8(log32(c))] x))
}
break
}
- // match: (SETNEstore [off] {sym} ptr (TESTL (SHLXL (MOVLconst [1]) x) y) mem)
- // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
- for {
- off := auxIntToInt32(v.AuxInt)
- sym := auxToSym(v.Aux)
- ptr := v_0
- if v_1.Op != OpAMD64TESTL {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLXL {
- continue
- }
- x := v_1_0.Args[1]
- v_1_0_0 := v_1_0.Args[0]
- if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
- continue
- }
- y := v_1_1
- mem := v_2
- v.reset(OpAMD64SETBstore)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg2(x, y)
- v.AddArg3(ptr, v0, mem)
- return true
- }
- break
- }
- // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLXQ (MOVQconst [1]) x) y) mem)
- // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
- for {
- off := auxIntToInt32(v.AuxInt)
- sym := auxToSym(v.Aux)
- ptr := v_0
- if v_1.Op != OpAMD64TESTQ {
- break
- }
- _ = v_1.Args[1]
- v_1_0 := v_1.Args[0]
- v_1_1 := v_1.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
- if v_1_0.Op != OpAMD64SHLXQ {
- continue
- }
- x := v_1_0.Args[1]
- v_1_0_0 := v_1_0.Args[0]
- if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
- continue
- }
- y := v_1_1
- mem := v_2
- v.reset(OpAMD64SETBstore)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg2(x, y)
- v.AddArg3(ptr, v0, mem)
- return true
- }
- break
- }
// match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (SHLL x y)
- // cond: buildcfg.GOAMD64 >= 3
- // result: (SHLXL x y)
- for {
- x := v_0
- y := v_1
- if !(buildcfg.GOAMD64 >= 3) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v.AddArg2(x, y)
- return true
- }
// match: (SHLL x (MOVQconst [c]))
// result: (SHLLconst [int8(c&31)] x)
for {
v.AddArg2(x, v0)
return true
}
+ // match: (SHLL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SHLXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHLXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (SHLQ x y)
- // cond: buildcfg.GOAMD64 >= 3
- // result: (SHLXQ x y)
- for {
- x := v_0
- y := v_1
- if !(buildcfg.GOAMD64 >= 3) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v.AddArg2(x, y)
- return true
- }
// match: (SHLQ x (MOVQconst [c]))
// result: (SHLQconst [int8(c&63)] x)
for {
v.AddArg2(x, v0)
return true
}
+ // match: (SHLQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SHLXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHLXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLXL(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (SHLXL x (MOVQconst [c]))
- // result: (SHLLconst [int8(c&31)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := auxIntToInt64(v_1.AuxInt)
- v.reset(OpAMD64SHLLconst)
- v.AuxInt = int8ToAuxInt(int8(c & 31))
- v.AddArg(x)
- return true
- }
- // match: (SHLXL x (MOVLconst [c]))
- // result: (SHLLconst [int8(c&31)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpAMD64SHLLconst)
- v.AuxInt = int8ToAuxInt(int8(c & 31))
- v.AddArg(x)
- return true
- }
- // match: (SHLXL x (ADDQconst [c] y))
- // cond: c & 31 == 0
- // result: (SHLXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHLXL x (NEGQ <t> (ADDQconst [c] y)))
- // cond: c & 31 == 0
- // result: (SHLXL x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHLXL x (ANDQconst [c] y))
- // cond: c & 31 == 31
- // result: (SHLXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHLXL x (NEGQ <t> (ANDQconst [c] y)))
- // cond: c & 31 == 31
- // result: (SHLXL x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHLXL x (ADDLconst [c] y))
- // cond: c & 31 == 0
- // result: (SHLXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHLXL x (NEGL <t> (ADDLconst [c] y)))
- // cond: c & 31 == 0
- // result: (SHLXL x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHLXL x (ANDLconst [c] y))
- // cond: c & 31 == 31
- // result: (SHLXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHLXL x (NEGL <t> (ANDLconst [c] y)))
- // cond: c & 31 == 31
- // result: (SHLXL x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SHLXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHLXL l:(MOVLload [off] {sym} ptr mem) x)
- // cond: canMergeLoad(v, l) && clobber(l)
- // result: (SHLXLload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHLXLload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
}
return false
}
-func rewriteValueAMD64_OpAMD64SHLXQ(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (SHLXQ x (MOVQconst [c]))
- // result: (SHLQconst [int8(c&63)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := auxIntToInt64(v_1.AuxInt)
- v.reset(OpAMD64SHLQconst)
- v.AuxInt = int8ToAuxInt(int8(c & 63))
- v.AddArg(x)
- return true
- }
- // match: (SHLXQ x (MOVLconst [c]))
- // result: (SHLQconst [int8(c&63)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpAMD64SHLQconst)
- v.AuxInt = int8ToAuxInt(int8(c & 63))
- v.AddArg(x)
- return true
- }
- // match: (SHLXQ x (ADDQconst [c] y))
- // cond: c & 63 == 0
- // result: (SHLXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHLXQ x (NEGQ <t> (ADDQconst [c] y)))
- // cond: c & 63 == 0
- // result: (SHLXQ x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHLXQ x (ANDQconst [c] y))
- // cond: c & 63 == 63
- // result: (SHLXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHLXQ x (NEGQ <t> (ANDQconst [c] y)))
- // cond: c & 63 == 63
- // result: (SHLXQ x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHLXQ x (ADDLconst [c] y))
- // cond: c & 63 == 0
- // result: (SHLXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHLXQ x (NEGL <t> (ADDLconst [c] y)))
- // cond: c & 63 == 0
- // result: (SHLXQ x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHLXQ x (ANDLconst [c] y))
- // cond: c & 63 == 63
- // result: (SHLXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHLXQ x (NEGL <t> (ANDLconst [c] y)))
- // cond: c & 63 == 63
- // result: (SHLXQ x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SHLXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHLXQ l:(MOVQload [off] {sym} ptr mem) x)
- // cond: canMergeLoad(v, l) && clobber(l)
- // result: (SHLXQload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHLXQload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (SHRL x y)
- // cond: buildcfg.GOAMD64 >= 3
- // result: (SHRXL x y)
- for {
- x := v_0
- y := v_1
- if !(buildcfg.GOAMD64 >= 3) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v.AddArg2(x, y)
- return true
- }
// match: (SHRL x (MOVQconst [c]))
// result: (SHRLconst [int8(c&31)] x)
for {
v.AddArg2(x, v0)
return true
}
+ // match: (SHRL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SHRXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHRXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (SHRQ x y)
- // cond: buildcfg.GOAMD64 >= 3
- // result: (SHRXQ x y)
- for {
- x := v_0
- y := v_1
- if !(buildcfg.GOAMD64 >= 3) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v.AddArg2(x, y)
- return true
- }
// match: (SHRQ x (MOVQconst [c]))
// result: (SHRQconst [int8(c&63)] x)
for {
v.AddArg2(x, v0)
return true
}
+ // match: (SHRQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SHRXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHRXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRXL(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (SHRXL x (MOVQconst [c]))
- // result: (SHRLconst [int8(c&31)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := auxIntToInt64(v_1.AuxInt)
- v.reset(OpAMD64SHRLconst)
- v.AuxInt = int8ToAuxInt(int8(c & 31))
- v.AddArg(x)
- return true
- }
- // match: (SHRXL x (MOVLconst [c]))
- // result: (SHRLconst [int8(c&31)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpAMD64SHRLconst)
- v.AuxInt = int8ToAuxInt(int8(c & 31))
- v.AddArg(x)
- return true
- }
- // match: (SHRXL x (ADDQconst [c] y))
- // cond: c & 31 == 0
- // result: (SHRXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHRXL x (NEGQ <t> (ADDQconst [c] y)))
- // cond: c & 31 == 0
- // result: (SHRXL x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHRXL x (ANDQconst [c] y))
- // cond: c & 31 == 31
- // result: (SHRXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHRXL x (NEGQ <t> (ANDQconst [c] y)))
- // cond: c & 31 == 31
- // result: (SHRXL x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHRXL x (ADDLconst [c] y))
- // cond: c & 31 == 0
- // result: (SHRXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHRXL x (NEGL <t> (ADDLconst [c] y)))
- // cond: c & 31 == 0
- // result: (SHRXL x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 0) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHRXL x (ANDLconst [c] y))
- // cond: c & 31 == 31
- // result: (SHRXL x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHRXL x (NEGL <t> (ANDLconst [c] y)))
- // cond: c & 31 == 31
- // result: (SHRXL x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&31 == 31) {
- break
- }
- v.reset(OpAMD64SHRXL)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHRXL l:(MOVLload [off] {sym} ptr mem) x)
- // cond: canMergeLoad(v, l) && clobber(l)
- // result: (SHRXLload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHRXLload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
}
return false
}
-func rewriteValueAMD64_OpAMD64SHRXQ(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- b := v.Block
- // match: (SHRXQ x (MOVQconst [c]))
- // result: (SHRQconst [int8(c&63)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVQconst {
- break
- }
- c := auxIntToInt64(v_1.AuxInt)
- v.reset(OpAMD64SHRQconst)
- v.AuxInt = int8ToAuxInt(int8(c & 63))
- v.AddArg(x)
- return true
- }
- // match: (SHRXQ x (MOVLconst [c]))
- // result: (SHRQconst [int8(c&63)] x)
- for {
- x := v_0
- if v_1.Op != OpAMD64MOVLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpAMD64SHRQconst)
- v.AuxInt = int8ToAuxInt(int8(c & 63))
- v.AddArg(x)
- return true
- }
- // match: (SHRXQ x (ADDQconst [c] y))
- // cond: c & 63 == 0
- // result: (SHRXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHRXQ x (NEGQ <t> (ADDQconst [c] y)))
- // cond: c & 63 == 0
- // result: (SHRXQ x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHRXQ x (ANDQconst [c] y))
- // cond: c & 63 == 63
- // result: (SHRXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHRXQ x (NEGQ <t> (ANDQconst [c] y)))
- // cond: c & 63 == 63
- // result: (SHRXQ x (NEGQ <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGQ {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDQconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHRXQ x (ADDLconst [c] y))
- // cond: c & 63 == 0
- // result: (SHRXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHRXQ x (NEGL <t> (ADDLconst [c] y)))
- // cond: c & 63 == 0
- // result: (SHRXQ x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ADDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 0) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHRXQ x (ANDLconst [c] y))
- // cond: c & 63 == 63
- // result: (SHRXQ x y)
- for {
- x := v_0
- if v_1.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1.AuxInt)
- y := v_1.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v.AddArg2(x, y)
- return true
- }
- // match: (SHRXQ x (NEGL <t> (ANDLconst [c] y)))
- // cond: c & 63 == 63
- // result: (SHRXQ x (NEGL <t> y))
- for {
- x := v_0
- if v_1.Op != OpAMD64NEGL {
- break
- }
- t := v_1.Type
- v_1_0 := v_1.Args[0]
- if v_1_0.Op != OpAMD64ANDLconst {
- break
- }
- c := auxIntToInt32(v_1_0.AuxInt)
- y := v_1_0.Args[0]
- if !(c&63 == 63) {
- break
- }
- v.reset(OpAMD64SHRXQ)
- v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
- v0.AddArg(y)
- v.AddArg2(x, v0)
- return true
- }
- // match: (SHRXQ l:(MOVQload [off] {sym} ptr mem) x)
- // cond: canMergeLoad(v, l) && clobber(l)
- // result: (SHRXQload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHRXQload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
}
break
}
- // match: (XORL (SHLXL (MOVLconst [1]) y) x)
- // result: (BTCL x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXL {
- continue
- }
- y := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
- continue
- }
- x := v_1
- v.reset(OpAMD64BTCL)
- v.AddArg2(x, y)
- return true
- }
- break
- }
// match: (XORL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
// result: (BTCLconst [int8(log32(c))] x)
}
break
}
- // match: (XORQ (SHLXQ (MOVQconst [1]) y) x)
- // result: (BTCQ x y)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- if v_0.Op != OpAMD64SHLXQ {
- continue
- }
- y := v_0.Args[1]
- v_0_0 := v_0.Args[0]
- if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
- continue
- }
- x := v_1
- v.reset(OpAMD64BTCQ)
- v.AddArg2(x, y)
- return true
- }
- break
- }
// match: (XORQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
// result: (BTCQconst [int8(log64(c))] x)
}
break
}
- // match: (EQ (TESTL (SHLXL (MOVLconst [1]) x) y))
- // result: (UGE (BTL x y))
- for b.Controls[0].Op == OpAMD64TESTL {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpAMD64SHLXL {
- continue
- }
- x := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
- continue
- }
- y := v_0_1
- v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockAMD64UGE, v0)
- return true
- }
- break
- }
- // match: (EQ (TESTQ (SHLXQ (MOVQconst [1]) x) y))
- // result: (UGE (BTQ x y))
- for b.Controls[0].Op == OpAMD64TESTQ {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpAMD64SHLXQ {
- continue
- }
- x := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- continue
- }
- y := v_0_1
- v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockAMD64UGE, v0)
- return true
- }
- break
- }
// match: (EQ (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (UGE (BTLconst [int8(log32(c))] x))
}
break
}
- // match: (NE (TESTL (SHLXL (MOVLconst [1]) x) y))
- // result: (ULT (BTL x y))
- for b.Controls[0].Op == OpAMD64TESTL {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpAMD64SHLXL {
- continue
- }
- x := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
- continue
- }
- y := v_0_1
- v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockAMD64ULT, v0)
- return true
- }
- break
- }
- // match: (NE (TESTQ (SHLXQ (MOVQconst [1]) x) y))
- // result: (ULT (BTQ x y))
- for b.Controls[0].Op == OpAMD64TESTQ {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- if v_0_0.Op != OpAMD64SHLXQ {
- continue
- }
- x := v_0_0.Args[1]
- v_0_0_0 := v_0_0.Args[0]
- if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
- continue
- }
- y := v_0_1
- v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockAMD64ULT, v0)
- return true
- }
- break
- }
// match: (NE (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (ULT (BTLconst [int8(log32(c))] x))