p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
- case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
- ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst:
+ case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst:
+ op := v.Op
+ if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
+ // Emit 32-bit version because it's shorter
+ op = ssa.OpAMD64BTLconst
+ }
+ p := s.Prog(op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+ case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
(XORLconst [1] (SETBE x)) -> (SETA x)
(XORLconst [1] (SETA x)) -> (SETBE x)
-// Convert BTQconst to BTLconst if possible. It has a shorter encoding.
-(BTQconst [c] x) && c < 32 -> (BTLconst [c] x)
-
// Special case for floating point - LF/LEF not generated
(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no)
(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE cmp yes no)
return rewriteValueAMD64_OpAMD64ANDQmem_0(v)
case OpAMD64BSFQ:
return rewriteValueAMD64_OpAMD64BSFQ_0(v)
- case OpAMD64BTQconst:
- return rewriteValueAMD64_OpAMD64BTQconst_0(v)
case OpAMD64CMOVLCC:
return rewriteValueAMD64_OpAMD64CMOVLCC_0(v)
case OpAMD64CMOVLCS:
}
return false
}
-func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool {
- // match: (BTQconst [c] x)
- // cond: c < 32
- // result: (BTLconst [c] x)
- for {
- c := v.AuxInt
- x := v.Args[0]
- if !(c < 32) {
- break
- }
- v.reset(OpAMD64BTLconst)
- v.AuxInt = c
- v.AddArg(x)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool {
// match: (CMOVLCC x y (InvertFlags cond))
// cond: