return rewriteValueAMD64_OpAMD64SHLQ(v)
case OpAMD64SHLQconst:
return rewriteValueAMD64_OpAMD64SHLQconst(v)
+ case OpAMD64SHLXL:
+ return rewriteValueAMD64_OpAMD64SHLXL(v)
+ case OpAMD64SHLXQ:
+ return rewriteValueAMD64_OpAMD64SHLXQ(v)
case OpAMD64SHRB:
return rewriteValueAMD64_OpAMD64SHRB(v)
case OpAMD64SHRBconst:
return rewriteValueAMD64_OpAMD64SHRW(v)
case OpAMD64SHRWconst:
return rewriteValueAMD64_OpAMD64SHRWconst(v)
+ case OpAMD64SHRXL:
+ return rewriteValueAMD64_OpAMD64SHRXL(v)
+ case OpAMD64SHRXQ:
+ return rewriteValueAMD64_OpAMD64SHRXQ(v)
case OpAMD64SUBL:
return rewriteValueAMD64_OpAMD64SUBL(v)
case OpAMD64SUBLconst:
}
break
}
+ // match: (ANDL (NOTL (SHLXL (MOVLconst [1]) y)) x)
+ // result: (BTRL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTL {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ANDL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
// result: (BTRLconst [int8(log32(^c))] x)
v.AddArg2(x, y)
return true
}
+ // match: (ANDNL x (SHLXL (MOVLconst [1]) y))
+ // result: (BTRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64SHLXL {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
v.AddArg2(x, y)
return true
}
+ // match: (ANDNQ x (SHLXQ (MOVQconst [1]) y))
+ // result: (BTRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64SHLXQ {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
}
break
}
+ // match: (ANDQ (NOTQ (SHLXQ (MOVQconst [1]) y)) x)
+ // result: (BTRQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTQ {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ANDQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
// result: (BTRQconst [int8(log64(^c))] x)
v.AddArg2(y, x)
return true
}
+ // match: (BTLconst [0] s:(SHRXQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRXQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
// match: (BTLconst [c] (SHRLconst [d] x))
// cond: (c+d)<32
// result: (BTLconst [c+d] x)
v.AddArg2(y, x)
return true
}
+ // match: (BTLconst [0] s:(SHRXL x y))
+ // result: (BTL y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRXL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTL)
+ v.AddArg2(y, x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
v.AddArg2(y, x)
return true
}
+ // match: (BTQconst [0] s:(SHRXQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRXQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
}
break
}
+ // match: (ORL (SHLXL (MOVLconst [1]) y) x)
+ // result: (BTSL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ORL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
// result: (BTSLconst [int8(log32(c))] x)
}
break
}
+ // match: (ORL (SHLXL x y) (ANDL (SHRXL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
+ // result: (ROLL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRXL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLXL x y) (ANDL (SHRXL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
+ // result: (ROLL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRXL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRXL x y) (ANDL (SHLXL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
+ // result: (RORL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRXL x y) (ANDL (SHLXL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
+ // result: (RORL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
// cond: v.Type.Size() == 2
// result: (ROLW x y)
}
break
}
+ // match: (ORL (SHLXL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
+ // cond: v.Type.Size() == 2
+ // result: (ROLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLXL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
+ // cond: v.Type.Size() == 2
+ // result: (ROLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRW x (ANDQconst y [15])) (SHLXL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
+ // cond: v.Type.Size() == 2
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHRW x (ANDLconst y [15])) (SHLXL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
+ // cond: v.Type.Size() == 2
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
// cond: v.Type.Size() == 1
// result: (ROLB x y)
}
break
}
+ // match: (ORL (SHLXL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
+ // cond: v.Type.Size() == 1
+ // result: (ROLB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLXL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
+ // cond: v.Type.Size() == 1
+ // result: (ROLB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLXL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
+ // cond: v.Type.Size() == 1
+ // result: (RORB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLXL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
+ // cond: v.Type.Size() == 1
+ // result: (RORB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLXL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ORL x x)
// result: x
for {
}
break
}
+ // match: (ORQ (SHLXQ (MOVQconst [1]) y) x)
+ // result: (BTSQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (ORQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
// result: (BTSQconst [int8(log64(c))] x)
}
break
}
+ // match: (ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
+ // result: (ROLQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
+ // result: (ROLQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
+ // result: (RORQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
+ // result: (RORQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
// match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits)))
// result: (SHRDQ lo hi bits)
for {
}
break
}
+ // match: (ORQ (SHRXQ lo bits) (SHLXQ hi (NEGQ bits)))
+ // result: (SHRDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHLXQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHRDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLXQ lo bits) (SHRXQ hi (NEGQ bits)))
+ // result: (SHLDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRXQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHLDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
// match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
// result: (MOVQconst [c|d])
for {
}
break
}
+ // match: (SETEQ (TESTL (SHLXL (MOVLconst [1]) x) y))
+ // result: (SETAE (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ (SHLXQ (MOVQconst [1]) x) y))
+ // result: (SETAE (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
// match: (SETEQ (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETAE (BTLconst [int8(log32(c))] x))
}
break
}
+ // match: (SETEQstore [off] {sym} ptr (TESTL (SHLXL (MOVLconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLXQ (MOVQconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
// match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
}
break
}
+ // match: (SETNE (TESTL (SHLXL (MOVLconst [1]) x) y))
+ // result: (SETB (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ (SHLXQ (MOVQconst [1]) x) y))
+ // result: (SETB (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
// match: (SETNE (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETB (BTLconst [int8(log32(c))] x))
}
break
}
+ // match: (SETNEstore [off] {sym} ptr (TESTL (SHLXL (MOVLconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLXQ (MOVQconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
// match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
// cond: isUint32PowerOfTwo(int64(c))
// result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SHLL x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SHLL x (MOVQconst [c]))
// result: (SHLLconst [int8(c&31)] x)
for {
v.AddArg2(x, v0)
return true
}
- // match: (SHLL l:(MOVLload [off] {sym} ptr mem) x)
- // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
- // result: (SHLXLload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHLXLload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SHLQ x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SHLQ x (MOVQconst [c]))
// result: (SHLQconst [int8(c&63)] x)
for {
v.AddArg2(x, v0)
return true
}
- // match: (SHLQ l:(MOVQload [off] {sym} ptr mem) x)
- // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
- // result: (SHLXQload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHLXQload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64SHLXL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLXL x (MOVQconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLXL x (MOVLconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLXL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLXL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLXL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLXL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLXL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SHLXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHLXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLXQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLXQ x (MOVQconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLXQ x (MOVLconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLXQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLXQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLXQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLXQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLXQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLXQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLXQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SHLXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHLXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SHRL x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SHRL x (MOVQconst [c]))
// result: (SHRLconst [int8(c&31)] x)
for {
v.AddArg2(x, v0)
return true
}
- // match: (SHRL l:(MOVLload [off] {sym} ptr mem) x)
- // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
- // result: (SHRXLload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVLload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHRXLload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ // match: (SHRQ x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
// match: (SHRQ x (MOVQconst [c]))
// result: (SHRQconst [int8(c&63)] x)
for {
v.AddArg2(x, v0)
return true
}
- // match: (SHRQ l:(MOVQload [off] {sym} ptr mem) x)
- // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
- // result: (SHRXQload [off] {sym} ptr x mem)
- for {
- l := v_0
- if l.Op != OpAMD64MOVQload {
- break
- }
- off := auxIntToInt32(l.AuxInt)
- sym := auxToSym(l.Aux)
- mem := l.Args[1]
- ptr := l.Args[0]
- x := v_1
- if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
- break
- }
- v.reset(OpAMD64SHRXQload)
- v.AuxInt = int32ToAuxInt(off)
- v.Aux = symToAux(sym)
- v.AddArg3(ptr, x, mem)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64SHRXL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRXL x (MOVQconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRXL x (MOVLconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRXL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRXL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRXL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRXL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRXL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SHRXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHRXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRXQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRXQ x (MOVQconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRXQ x (MOVLconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRXQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRXQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRXQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRXQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRXQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRXQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRXQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (SHRXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHRXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
}
break
}
+ // match: (XORL (SHLXL (MOVLconst [1]) y) x)
+ // result: (BTCL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (XORL (MOVLconst [c]) x)
// cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
// result: (BTCLconst [int8(log32(c))] x)
}
break
}
+ // match: (XORQ (SHLXQ (MOVQconst [1]) y) x)
+ // result: (BTCQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (XORQ (MOVQconst [c]) x)
// cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
// result: (BTCQconst [int8(log64(c))] x)
}
break
}
+ // match: (EQ (TESTL (SHLXL (MOVLconst [1]) x) y))
+ // result: (UGE (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ (SHLXQ (MOVQconst [1]) x) y))
+ // result: (UGE (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
// match: (EQ (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (UGE (BTLconst [int8(log32(c))] x))
}
break
}
+ // match: (NE (TESTL (SHLXL (MOVLconst [1]) x) y))
+ // result: (ULT (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ (SHLXQ (MOVQconst [1]) x) y))
+ // result: (ULT (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
// match: (NE (TESTLconst [c] x))
// cond: isUint32PowerOfTwo(int64(c))
// result: (ULT (BTLconst [int8(log32(c))] x))