// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
+(ORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem) =>
+ (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+(ORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem) =>
+ (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+(XORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem) =>
+ (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+(XORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem) =>
+ (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
// Convert ORconst into BTS, if the code gets smaller, with boundary being
// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
=> (BTRQconst [int8(log64(^c))] x)
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
=> (BTRLconst [int8(log32(^c))] x)
+(ANDLmodify [off] {sym} ptr (NOTL s:(SHLL (MOVLconst [1]) <t> x)) mem) =>
+ (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+(ANDQmodify [off] {sym} ptr (NOTQ s:(SHLQ (MOVQconst [1]) <t> x)) mem) =>
+ (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
// Special-case bit patterns on first/last bit.
// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
-(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
- ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) <t> x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((BTC|BTR|BTS)Lmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
-(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
- ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) <t> x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((BTC|BTR|BTS)Qmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
// Merge ADDQconst and LEAQ into atomic loads.
(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDLmodify [off] {sym} ptr (NOTL s:(SHLL (MOVLconst [1]) <t> x)) mem)
+ // result: (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64NOTL {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64SHLL {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
// match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(int64(off1)+int64(off2))
// result: (ANDLmodify [off1+off2] {sym} base val mem)
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDQmodify [off] {sym} ptr (NOTQ s:(SHLQ (MOVQconst [1]) <t> x)) mem)
+ // result: (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64NOTQ {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64SHLQ {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
// match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(int64(off1)+int64(off2))
// result: (ANDQmodify [off1+off2] {sym} base val mem)
}
break
}
- // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (BTCLmodify [off] {sym} ptr x mem)
+ // result: (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if y.Op != OpAMD64BTCL {
break
}
+ t := y.Type
x := y.Args[1]
l := y.Args[0]
if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
v.reset(OpAMD64BTCLmodify)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (BTRLmodify [off] {sym} ptr x mem)
+ // result: (BTRLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if y.Op != OpAMD64BTRL {
break
}
+ t := y.Type
x := y.Args[1]
l := y.Args[0]
if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
v.reset(OpAMD64BTRLmodify)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
return true
}
- // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) <t> x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (BTSLmodify [off] {sym} ptr x mem)
+ // result: (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if y.Op != OpAMD64BTSL {
break
}
+ t := y.Type
x := y.Args[1]
l := y.Args[0]
if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
v.reset(OpAMD64BTSLmodify)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
// match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
// cond: is32Bit(int64(off1)+int64(off2))
// result: (MOVQstore [off1+off2] {sym} ptr val mem)
}
break
}
- // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (BTCQmodify [off] {sym} ptr x mem)
+ // result: (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if y.Op != OpAMD64BTCQ {
break
}
+ t := y.Type
x := y.Args[1]
l := y.Args[0]
if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
v.reset(OpAMD64BTCQmodify)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (BTRQmodify [off] {sym} ptr x mem)
+ // result: (BTRQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if y.Op != OpAMD64BTRQ {
break
}
+ t := y.Type
x := y.Args[1]
l := y.Args[0]
if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
v.reset(OpAMD64BTRQmodify)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
return true
}
- // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) <t> x) mem)
// cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
- // result: (BTSQmodify [off] {sym} ptr x mem)
+ // result: (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if y.Op != OpAMD64BTSQ {
break
}
+ t := y.Type
x := y.Args[1]
l := y.Args[0]
if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
v.reset(OpAMD64BTSQmodify)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
+ v0 := b.NewValue0(l.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem)
+ // result: (BTSLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ s := v_1
+ if s.Op != OpAMD64SHLL {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
// match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(int64(off1)+int64(off2))
// result: (ORLmodify [off1+off2] {sym} base val mem)
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem)
+ // result: (BTSQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ s := v_1
+ if s.Op != OpAMD64SHLQ {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
// match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(int64(off1)+int64(off2))
// result: (ORQmodify [off1+off2] {sym} base val mem)
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORLmodify [off] {sym} ptr s:(SHLL (MOVLconst [1]) <t> x) mem)
+ // result: (BTCLmodify [off] {sym} ptr (ANDLconst <t> [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ s := v_1
+ if s.Op != OpAMD64SHLL {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVLconst || auxIntToInt32(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, t)
+ v0.AuxInt = int32ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
// match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(int64(off1)+int64(off2))
// result: (XORLmodify [off1+off2] {sym} base val mem)
v_2 := v.Args[2]
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORQmodify [off] {sym} ptr s:(SHLQ (MOVQconst [1]) <t> x) mem)
+ // result: (BTCQmodify [off] {sym} ptr (ANDQconst <t> [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ s := v_1
+ if s.Op != OpAMD64SHLQ {
+ break
+ }
+ t := s.Type
+ x := s.Args[1]
+ s_0 := s.Args[0]
+ if s_0.Op != OpAMD64MOVQconst || auxIntToInt64(s_0.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64ANDQconst, t)
+ v0.AuxInt = int32ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
// match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
// cond: is32Bit(int64(off1)+int64(off2))
// result: (XORQmodify [off1+off2] {sym} base val mem)