return rewriteValueAMD64_OpAMD64ANDQmem_0(v)
case OpAMD64BSFQ:
return rewriteValueAMD64_OpAMD64BSFQ_0(v)
+ case OpAMD64BTLconst:
+ return rewriteValueAMD64_OpAMD64BTLconst_0(v)
+ case OpAMD64BTQconst:
+ return rewriteValueAMD64_OpAMD64BTQconst_0(v)
+ case OpAMD64BTRLconst:
+ return rewriteValueAMD64_OpAMD64BTRLconst_0(v)
+ case OpAMD64BTRQconst:
+ return rewriteValueAMD64_OpAMD64BTRQconst_0(v)
+ case OpAMD64BTSLconst:
+ return rewriteValueAMD64_OpAMD64BTSLconst_0(v)
+ case OpAMD64BTSQconst:
+ return rewriteValueAMD64_OpAMD64BTSQconst_0(v)
case OpAMD64CMOVLCC:
return rewriteValueAMD64_OpAMD64CMOVLCC_0(v)
case OpAMD64CMOVLCS:
case OpAMD64SETBmem:
return rewriteValueAMD64_OpAMD64SETBmem_0(v)
case OpAMD64SETEQ:
- return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v)
+ return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) || rewriteValueAMD64_OpAMD64SETEQ_20(v)
case OpAMD64SETEQmem:
- return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v)
+ return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) || rewriteValueAMD64_OpAMD64SETEQmem_20(v)
case OpAMD64SETG:
return rewriteValueAMD64_OpAMD64SETG_0(v)
case OpAMD64SETGE:
case OpAMD64SETLmem:
return rewriteValueAMD64_OpAMD64SETLmem_0(v)
case OpAMD64SETNE:
- return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v)
+ return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) || rewriteValueAMD64_OpAMD64SETNE_20(v)
case OpAMD64SETNEmem:
- return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v)
+ return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) || rewriteValueAMD64_OpAMD64SETNEmem_20(v)
case OpAMD64SHLL:
return rewriteValueAMD64_OpAMD64SHLL_0(v)
case OpAMD64SHLLconst:
case OpAMD64XORLmem:
return rewriteValueAMD64_OpAMD64XORLmem_0(v)
case OpAMD64XORQ:
- return rewriteValueAMD64_OpAMD64XORQ_0(v)
+ return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v)
case OpAMD64XORQconst:
return rewriteValueAMD64_OpAMD64XORQconst_0(v)
case OpAMD64XORQmem:
return false
}
func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x)
+ // cond: !config.nacl
+ // result: (BTRL x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64NOTL {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_0_0_0.AuxInt != 1 {
+ break
+ }
+ y := v_0_0.Args[1]
+ x := v.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ANDL x (NOTL (SHLL (MOVLconst [1]) y)))
+ // cond: !config.nacl
+ // result: (BTRL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64NOTL {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_1_0_0.AuxInt != 1 {
+ break
+ }
+ y := v_1_0.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ANDL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
+ // result: (BTRLconst [log2uint32(^c)] x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = log2uint32(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDL x (MOVLconst [c]))
+ // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
+ // result: (BTRLconst [log2uint32(^c)] x)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = log2uint32(^c)
+ v.AddArg(x)
+ return true
+ }
// match: (ANDL x (MOVLconst [c]))
// cond:
// result: (ANDLconst [c] x)
return false
}
func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ANDLconst [c] x)
+ // cond: isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
+ // result: (BTRLconst [log2uint32(^c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = log2uint32(^c)
+ v.AddArg(x)
+ return true
+ }
// match: (ANDLconst [c] (ANDLconst [d] x))
// cond:
// result: (ANDLconst [c & d] x)
return false
}
func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x)
+ // cond: !config.nacl
+ // result: (BTRQ x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64NOTQ {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_0_0_0.AuxInt != 1 {
+ break
+ }
+ y := v_0_0.Args[1]
+ x := v.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ANDQ x (NOTQ (SHLQ (MOVQconst [1]) y)))
+ // cond: !config.nacl
+ // result: (BTRQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64NOTQ {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_1_0_0.AuxInt != 1 {
+ break
+ }
+ y := v_1_0.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ANDQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
+ // result: (BTRQconst [log2(^c)] x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = log2(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQ x (MOVQconst [c]))
+ // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
+ // result: (BTRQconst [log2(^c)] x)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = log2(^c)
+ v.AddArg(x)
+ return true
+ }
// match: (ANDQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ANDQconst [c] x)
return false
}
func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ANDQconst [c] x)
+ // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
+ // result: (BTRQconst [log2(^c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = log2(^c)
+ v.AddArg(x)
+ return true
+ }
// match: (ANDQconst [c] (ANDQconst [d] x))
// cond:
// result: (ANDQconst [c & d] x)
}
return false
}
+func rewriteValueAMD64_OpAMD64BTLconst_0(v *Value) bool {
+ // match: (BTLconst [c] (SHRQconst [d] x))
+ // cond: (c+d)<64
+ // result: (BTQconst [c+d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !((c + d) < 64) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = c + d
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHLQconst [d] x))
+ // cond: c>d
+ // result: (BTLconst [c-d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = c - d
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRQ x y))
+ // cond:
+ // result: (BTQ y x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ s := v.Args[0]
+ if s.Op != OpAMD64SHRQ {
+ break
+ }
+ _ = s.Args[1]
+ x := s.Args[0]
+ y := s.Args[1]
+ v.reset(OpAMD64BTQ)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHRLconst [d] x))
+ // cond: (c+d)<32
+ // result: (BTLconst [c+d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !((c + d) < 32) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = c + d
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHLLconst [d] x))
+ // cond: c>d
+ // result: (BTLconst [c-d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = c - d
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRL x y))
+ // cond:
+ // result: (BTL y x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ s := v.Args[0]
+ if s.Op != OpAMD64SHRL {
+ break
+ }
+ _ = s.Args[1]
+ x := s.Args[0]
+ y := s.Args[1]
+ v.reset(OpAMD64BTL)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool {
+ // match: (BTQconst [c] (SHRQconst [d] x))
+ // cond: (c+d)<64
+ // result: (BTQconst [c+d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !((c + d) < 64) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = c + d
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTQconst [c] (SHLQconst [d] x))
+ // cond: c>d
+ // result: (BTQconst [c-d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = c - d
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTQconst [0] s:(SHRQ x y))
+ // cond:
+ // result: (BTQ y x)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ s := v.Args[0]
+ if s.Op != OpAMD64SHRQ {
+ break
+ }
+ _ = s.Args[1]
+ x := s.Args[0]
+ y := s.Args[1]
+ v.reset(OpAMD64BTQ)
+ v.AddArg(y)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLconst_0(v *Value) bool {
+ // match: (BTRLconst [c] (BTSLconst [c] x))
+ // cond:
+ // result: (BTRLconst [c] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ if v_0.AuxInt != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (BTCLconst [c] x))
+ // cond:
+ // result: (BTRLconst [c] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ if v_0.AuxInt != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool {
+ // match: (BTRQconst [c] (BTSQconst [c] x))
+ // cond:
+ // result: (BTRQconst [c] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ if v_0.AuxInt != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (BTCQconst [c] x))
+ // cond:
+ // result: (BTRQconst [c] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ if v_0.AuxInt != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool {
+ // match: (BTSLconst [c] (BTRLconst [c] x))
+ // cond:
+ // result: (BTSLconst [c] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ if v_0.AuxInt != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (BTCLconst [c] x))
+ // cond:
+ // result: (BTSLconst [c] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ if v_0.AuxInt != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool {
+ // match: (BTSQconst [c] (BTRQconst [c] x))
+ // cond:
+ // result: (BTSQconst [c] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ if v_0.AuxInt != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (BTCQconst [c] x))
+ // cond:
+ // result: (BTSQconst [c] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ if v_0.AuxInt != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool {
// match: (CMOVLCC x y (InvertFlags cond))
// cond:
return false
}
func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ORL (SHLL (MOVLconst [1]) y) x)
+ // cond: !config.nacl
+ // result: (BTSL x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_0_0.AuxInt != 1 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ORL x (SHLL (MOVLconst [1]) y))
+ // cond: !config.nacl
+ // result: (BTSL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_1_0.AuxInt != 1 {
+ break
+ }
+ y := v_1.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ORL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTSLconst [log2uint32(c)] x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = log2uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORL x (MOVLconst [c]))
+ // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTSLconst [log2uint32(c)] x)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = log2uint32(c)
+ v.AddArg(x)
+ return true
+ }
// match: (ORL x (MOVLconst [c]))
// cond:
// result: (ORLconst [c] x)
v.AddArg(x)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool {
// match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
// cond: d==8-c && c < 8 && t.Size() == 1
// result: (ROLBconst x [c])
v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool {
// match: (ORL (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))) (SHLL x y))
// cond:
// result: (ROLL x y)
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool {
// match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
// cond:
// result: (RORL x y)
v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool {
// match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
// cond:
// result: (RORL x y)
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool {
// match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15])))
// cond: v.Type.Size() == 2
// result: (ROLW x y)
v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool {
// match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15])))
// cond: v.Type.Size() == 2
// result: (ROLW x y)
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool {
// match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
// cond: v.Type.Size() == 1
// result: (ROLB x y)
v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
// cond: v.Type.Size() == 1
// result: (ROLB x y)
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
// cond: v.Type.Size() == 1
// result: (RORB x y)
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
v0.AddArg(v1)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
v0.AddArg(v1)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
v0.AddArg(v1)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
v0.AddArg(v1)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool {
// match: (ORL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
// result: (ORLmem x [off] {sym} ptr mem)
return false
}
func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ORLconst [c] x)
+ // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTSLconst [log2uint32(c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = log2uint32(c)
+ v.AddArg(x)
+ return true
+ }
// match: (ORLconst [c] x)
// cond: int32(c)==0
// result: x
return false
}
func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ORQ (SHLQ (MOVQconst [1]) y) x)
+ // cond: !config.nacl
+ // result: (BTSQ x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_0_0.AuxInt != 1 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ORQ x (SHLQ (MOVQconst [1]) y))
+ // cond: !config.nacl
+ // result: (BTSQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_1_0.AuxInt != 1 {
+ break
+ }
+ y := v_1.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTSQconst [log2(c)] x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQ x (MOVQconst [c]))
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTSQconst [log2(c)] x)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
// match: (ORQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (ORQconst [c] x)
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool {
// match: (ORQ (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))) (SHLQ x y))
// cond:
// result: (ROLQ x y)
v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool {
// match: (ORQ (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))) (SHLQ x y))
// cond:
// result: (ROLQ x y)
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
// cond:
// result: (RORQ x y)
v.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ x x)
// cond:
// result: x
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem))
// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i0] {s} p idx mem)
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem)
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem))
// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem)
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y)
v0.AddArg(y)
return true
}
- // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y))
- // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
- for {
- _ = v.Args[1]
- s1 := v.Args[0]
- if s1.Op != OpAMD64SHLQconst {
- break
- }
- j1 := s1.AuxInt
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i1 := x1.AuxInt
- s := x1.Aux
- _ = x1.Args[2]
- idx := x1.Args[0]
- p := x1.Args[1]
- mem := x1.Args[2]
- or := v.Args[1]
- if or.Op != OpAMD64ORQ {
- break
- }
- _ = or.Args[1]
- s0 := or.Args[0]
- if s0.Op != OpAMD64SHLQconst {
- break
- }
- j0 := s0.AuxInt
- x0 := s0.Args[0]
- if x0.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i0 := x0.AuxInt
- if x0.Aux != s {
- break
- }
- _ = x0.Args[2]
- if p != x0.Args[0] {
- break
- }
- if idx != x0.Args[1] {
- break
- }
- if mem != x0.Args[2] {
- break
- }
- y := or.Args[1]
- if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
- break
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
- v2.AuxInt = i0
- v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(idx)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
- return true
- }
- // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y))
- // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
- for {
- _ = v.Args[1]
- s1 := v.Args[0]
- if s1.Op != OpAMD64SHLQconst {
- break
- }
- j1 := s1.AuxInt
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i1 := x1.AuxInt
- s := x1.Aux
- _ = x1.Args[2]
- p := x1.Args[0]
- idx := x1.Args[1]
- mem := x1.Args[2]
- or := v.Args[1]
- if or.Op != OpAMD64ORQ {
- break
- }
- _ = or.Args[1]
- s0 := or.Args[0]
- if s0.Op != OpAMD64SHLQconst {
- break
- }
- j0 := s0.AuxInt
- x0 := s0.Args[0]
- if x0.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i0 := x0.AuxInt
- if x0.Aux != s {
- break
- }
- _ = x0.Args[2]
- if idx != x0.Args[0] {
- break
- }
- if p != x0.Args[1] {
- break
- }
- if mem != x0.Args[2] {
- break
- }
- y := or.Args[1]
- if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
- break
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
- v2.AuxInt = i0
- v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(idx)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
- return true
- }
- // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y))
- // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
- for {
- _ = v.Args[1]
- s1 := v.Args[0]
- if s1.Op != OpAMD64SHLQconst {
- break
- }
- j1 := s1.AuxInt
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i1 := x1.AuxInt
- s := x1.Aux
- _ = x1.Args[2]
- idx := x1.Args[0]
- p := x1.Args[1]
- mem := x1.Args[2]
- or := v.Args[1]
- if or.Op != OpAMD64ORQ {
- break
- }
- _ = or.Args[1]
- s0 := or.Args[0]
- if s0.Op != OpAMD64SHLQconst {
- break
- }
- j0 := s0.AuxInt
- x0 := s0.Args[0]
- if x0.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i0 := x0.AuxInt
- if x0.Aux != s {
- break
- }
- _ = x0.Args[2]
- if idx != x0.Args[0] {
- break
- }
- if p != x0.Args[1] {
- break
- }
- if mem != x0.Args[2] {
- break
- }
- y := or.Args[1]
- if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
- break
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
- v2.AuxInt = i0
- v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(idx)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
- return true
- }
- // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
- // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
- for {
- _ = v.Args[1]
- s1 := v.Args[0]
- if s1.Op != OpAMD64SHLQconst {
- break
- }
- j1 := s1.AuxInt
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i1 := x1.AuxInt
- s := x1.Aux
- _ = x1.Args[2]
- p := x1.Args[0]
- idx := x1.Args[1]
- mem := x1.Args[2]
- or := v.Args[1]
- if or.Op != OpAMD64ORQ {
- break
- }
- _ = or.Args[1]
- y := or.Args[0]
- s0 := or.Args[1]
- if s0.Op != OpAMD64SHLQconst {
- break
- }
- j0 := s0.AuxInt
- x0 := s0.Args[0]
- if x0.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i0 := x0.AuxInt
- if x0.Aux != s {
- break
- }
- _ = x0.Args[2]
- if p != x0.Args[0] {
- break
- }
- if idx != x0.Args[1] {
- break
- }
- if mem != x0.Args[2] {
- break
- }
- if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
- break
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
- v2.AuxInt = i0
- v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(idx)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
- return true
- }
return false
}
func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
_ = b
typ := &b.Func.Config.Types
_ = typ
- // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
break
}
_ = or.Args[1]
- y := or.Args[0]
- s0 := or.Args[1]
+ s0 := or.Args[0]
if s0.Op != OpAMD64SHLQconst {
break
}
if mem != x0.Args[2] {
break
}
+ y := or.Args[1]
if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
break
}
v0.AddArg(y)
return true
}
- // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
break
}
_ = or.Args[1]
- y := or.Args[0]
- s0 := or.Args[1]
+ s0 := or.Args[0]
if s0.Op != OpAMD64SHLQconst {
break
}
if mem != x0.Args[2] {
break
}
+ y := or.Args[1]
if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
break
}
v0.AddArg(y)
return true
}
- // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
break
}
_ = or.Args[1]
- y := or.Args[0]
- s0 := or.Args[1]
+ s0 := or.Args[0]
if s0.Op != OpAMD64SHLQconst {
break
}
if mem != x0.Args[2] {
break
}
+ y := or.Args[1]
if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
break
}
v0.AddArg(y)
return true
}
- // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
_ = v.Args[1]
- or := v.Args[0]
- if or.Op != OpAMD64ORQ {
- break
- }
- _ = or.Args[1]
- s0 := or.Args[0]
- if s0.Op != OpAMD64SHLQconst {
- break
- }
- j0 := s0.AuxInt
- x0 := s0.Args[0]
- if x0.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i0 := x0.AuxInt
- s := x0.Aux
- _ = x0.Args[2]
- p := x0.Args[0]
- idx := x0.Args[1]
- mem := x0.Args[2]
- y := or.Args[1]
- s1 := v.Args[1]
+ s1 := v.Args[0]
if s1.Op != OpAMD64SHLQconst {
break
}
break
}
i1 := x1.AuxInt
- if x1.Aux != s {
- break
- }
+ s := x1.Aux
_ = x1.Args[2]
- if p != x1.Args[0] {
- break
- }
- if idx != x1.Args[1] {
- break
- }
- if mem != x1.Args[2] {
- break
- }
- if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
- break
- }
- b = mergePoint(b, x0, x1)
- v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
- v.reset(OpCopy)
- v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
- v1.AuxInt = j0
- v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
- v2.AuxInt = i0
- v2.Aux = s
- v2.AddArg(p)
- v2.AddArg(idx)
- v2.AddArg(mem)
- v1.AddArg(v2)
- v0.AddArg(v1)
- v0.AddArg(y)
- return true
- }
- // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
- // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
- // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
- for {
- _ = v.Args[1]
- or := v.Args[0]
+ p := x1.Args[0]
+ idx := x1.Args[1]
+ mem := x1.Args[2]
+ or := v.Args[1]
if or.Op != OpAMD64ORQ {
break
}
_ = or.Args[1]
- s0 := or.Args[0]
+ y := or.Args[0]
+ s0 := or.Args[1]
if s0.Op != OpAMD64SHLQconst {
break
}
break
}
i0 := x0.AuxInt
- s := x0.Aux
- _ = x0.Args[2]
- idx := x0.Args[0]
- p := x0.Args[1]
- mem := x0.Args[2]
- y := or.Args[1]
- s1 := v.Args[1]
- if s1.Op != OpAMD64SHLQconst {
- break
- }
- j1 := s1.AuxInt
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i1 := x1.AuxInt
- if x1.Aux != s {
+ if x0.Aux != s {
break
}
- _ = x1.Args[2]
- if p != x1.Args[0] {
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
break
}
- if idx != x1.Args[1] {
+ if idx != x0.Args[1] {
break
}
- if mem != x1.Args[2] {
+ if mem != x0.Args[2] {
break
}
if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
v0.AddArg(y)
return true
}
- // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
_ = v.Args[1]
- or := v.Args[0]
+ s1 := v.Args[0]
+ if s1.Op != OpAMD64SHLQconst {
+ break
+ }
+ j1 := s1.AuxInt
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i1 := x1.AuxInt
+ s := x1.Aux
+ _ = x1.Args[2]
+ idx := x1.Args[0]
+ p := x1.Args[1]
+ mem := x1.Args[2]
+ or := v.Args[1]
if or.Op != OpAMD64ORQ {
break
}
break
}
i0 := x0.AuxInt
- s := x0.Aux
- _ = x0.Args[2]
- p := x0.Args[0]
- idx := x0.Args[1]
- mem := x0.Args[2]
- s1 := v.Args[1]
- if s1.Op != OpAMD64SHLQconst {
- break
- }
- j1 := s1.AuxInt
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i1 := x1.AuxInt
- if x1.Aux != s {
+ if x0.Aux != s {
break
}
- _ = x1.Args[2]
- if p != x1.Args[0] {
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
break
}
- if idx != x1.Args[1] {
+ if idx != x0.Args[1] {
break
}
- if mem != x1.Args[2] {
+ if mem != x0.Args[2] {
break
}
if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
v0.AddArg(y)
return true
}
- // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
_ = v.Args[1]
- or := v.Args[0]
+ s1 := v.Args[0]
+ if s1.Op != OpAMD64SHLQconst {
+ break
+ }
+ j1 := s1.AuxInt
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i1 := x1.AuxInt
+ s := x1.Aux
+ _ = x1.Args[2]
+ p := x1.Args[0]
+ idx := x1.Args[1]
+ mem := x1.Args[2]
+ or := v.Args[1]
if or.Op != OpAMD64ORQ {
break
}
break
}
i0 := x0.AuxInt
- s := x0.Aux
- _ = x0.Args[2]
- idx := x0.Args[0]
- p := x0.Args[1]
- mem := x0.Args[2]
- s1 := v.Args[1]
- if s1.Op != OpAMD64SHLQconst {
- break
- }
- j1 := s1.AuxInt
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i1 := x1.AuxInt
- if x1.Aux != s {
+ if x0.Aux != s {
break
}
- _ = x1.Args[2]
- if p != x1.Args[0] {
+ _ = x0.Args[2]
+ if idx != x0.Args[0] {
break
}
- if idx != x1.Args[1] {
+ if p != x0.Args[1] {
break
}
- if mem != x1.Args[2] {
+ if mem != x0.Args[2] {
break
}
if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
v0.AddArg(y)
return true
}
- // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
_ = v.Args[1]
- or := v.Args[0]
+ s1 := v.Args[0]
+ if s1.Op != OpAMD64SHLQconst {
+ break
+ }
+ j1 := s1.AuxInt
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i1 := x1.AuxInt
+ s := x1.Aux
+ _ = x1.Args[2]
+ idx := x1.Args[0]
+ p := x1.Args[1]
+ mem := x1.Args[2]
+ or := v.Args[1]
if or.Op != OpAMD64ORQ {
break
}
_ = or.Args[1]
- s0 := or.Args[0]
+ y := or.Args[0]
+ s0 := or.Args[1]
if s0.Op != OpAMD64SHLQconst {
break
}
break
}
i0 := x0.AuxInt
- s := x0.Aux
- _ = x0.Args[2]
- p := x0.Args[0]
- idx := x0.Args[1]
- mem := x0.Args[2]
- y := or.Args[1]
- s1 := v.Args[1]
- if s1.Op != OpAMD64SHLQconst {
- break
- }
- j1 := s1.AuxInt
- x1 := s1.Args[0]
- if x1.Op != OpAMD64MOVWloadidx1 {
- break
- }
- i1 := x1.AuxInt
- if x1.Aux != s {
+ if x0.Aux != s {
break
}
- _ = x1.Args[2]
- if idx != x1.Args[0] {
+ _ = x0.Args[2]
+ if idx != x0.Args[0] {
break
}
- if p != x1.Args[1] {
+ if p != x0.Args[1] {
break
}
- if mem != x1.Args[2] {
+ if mem != x0.Args[2] {
break
}
if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
v0.AddArg(y)
return true
}
- // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
+ // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
i0 := x0.AuxInt
s := x0.Aux
_ = x0.Args[2]
- idx := x0.Args[0]
- p := x0.Args[1]
+ p := x0.Args[0]
+ idx := x0.Args[1]
mem := x0.Args[2]
y := or.Args[1]
s1 := v.Args[1]
break
}
_ = x1.Args[2]
- if idx != x1.Args[0] {
+ if p != x1.Args[0] {
break
}
- if p != x1.Args[1] {
+ if idx != x1.Args[1] {
break
}
if mem != x1.Args[2] {
v0.AddArg(y)
return true
}
- // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
+ // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
+ for {
+ _ = v.Args[1]
+ or := v.Args[0]
+ if or.Op != OpAMD64ORQ {
+ break
+ }
+ _ = or.Args[1]
+ s0 := or.Args[0]
+ if s0.Op != OpAMD64SHLQconst {
+ break
+ }
+ j0 := s0.AuxInt
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i0 := x0.AuxInt
+ s := x0.Aux
+ _ = x0.Args[2]
+ idx := x0.Args[0]
+ p := x0.Args[1]
+ mem := x0.Args[2]
+ y := or.Args[1]
+ s1 := v.Args[1]
+ if s1.Op != OpAMD64SHLQconst {
+ break
+ }
+ j1 := s1.AuxInt
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i1 := x1.AuxInt
+ if x1.Aux != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = j0
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v2.AuxInt = i0
+ v2.Aux = s
+ v2.AddArg(p)
+ v2.AddArg(idx)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ return true
+ }
+ // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
for {
break
}
_ = x1.Args[2]
- if idx != x1.Args[0] {
+ if p != x1.Args[0] {
break
}
- if p != x1.Args[1] {
+ if idx != x1.Args[1] {
break
}
if mem != x1.Args[2] {
_ = b
typ := &b.Func.Config.Types
_ = typ
+ // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
+ for {
+ _ = v.Args[1]
+ or := v.Args[0]
+ if or.Op != OpAMD64ORQ {
+ break
+ }
+ _ = or.Args[1]
+ y := or.Args[0]
+ s0 := or.Args[1]
+ if s0.Op != OpAMD64SHLQconst {
+ break
+ }
+ j0 := s0.AuxInt
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i0 := x0.AuxInt
+ s := x0.Aux
+ _ = x0.Args[2]
+ idx := x0.Args[0]
+ p := x0.Args[1]
+ mem := x0.Args[2]
+ s1 := v.Args[1]
+ if s1.Op != OpAMD64SHLQconst {
+ break
+ }
+ j1 := s1.AuxInt
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i1 := x1.AuxInt
+ if x1.Aux != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ if idx != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = j0
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v2.AuxInt = i0
+ v2.Aux = s
+ v2.AddArg(p)
+ v2.AddArg(idx)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ return true
+ }
+ // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
+ for {
+ _ = v.Args[1]
+ or := v.Args[0]
+ if or.Op != OpAMD64ORQ {
+ break
+ }
+ _ = or.Args[1]
+ s0 := or.Args[0]
+ if s0.Op != OpAMD64SHLQconst {
+ break
+ }
+ j0 := s0.AuxInt
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i0 := x0.AuxInt
+ s := x0.Aux
+ _ = x0.Args[2]
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ y := or.Args[1]
+ s1 := v.Args[1]
+ if s1.Op != OpAMD64SHLQconst {
+ break
+ }
+ j1 := s1.AuxInt
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i1 := x1.AuxInt
+ if x1.Aux != s {
+ break
+ }
+ _ = x1.Args[2]
+ if idx != x1.Args[0] {
+ break
+ }
+ if p != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = j0
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v2.AuxInt = i0
+ v2.Aux = s
+ v2.AddArg(p)
+ v2.AddArg(idx)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ return true
+ }
+ // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
+ for {
+ _ = v.Args[1]
+ or := v.Args[0]
+ if or.Op != OpAMD64ORQ {
+ break
+ }
+ _ = or.Args[1]
+ s0 := or.Args[0]
+ if s0.Op != OpAMD64SHLQconst {
+ break
+ }
+ j0 := s0.AuxInt
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i0 := x0.AuxInt
+ s := x0.Aux
+ _ = x0.Args[2]
+ idx := x0.Args[0]
+ p := x0.Args[1]
+ mem := x0.Args[2]
+ y := or.Args[1]
+ s1 := v.Args[1]
+ if s1.Op != OpAMD64SHLQconst {
+ break
+ }
+ j1 := s1.AuxInt
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i1 := x1.AuxInt
+ if x1.Aux != s {
+ break
+ }
+ _ = x1.Args[2]
+ if idx != x1.Args[0] {
+ break
+ }
+ if p != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = j0
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v2.AuxInt = i0
+ v2.Aux = s
+ v2.AddArg(p)
+ v2.AddArg(idx)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ return true
+ }
+ // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
+ // result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
+ for {
+ _ = v.Args[1]
+ or := v.Args[0]
+ if or.Op != OpAMD64ORQ {
+ break
+ }
+ _ = or.Args[1]
+ y := or.Args[0]
+ s0 := or.Args[1]
+ if s0.Op != OpAMD64SHLQconst {
+ break
+ }
+ j0 := s0.AuxInt
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i0 := x0.AuxInt
+ s := x0.Aux
+ _ = x0.Args[2]
+ p := x0.Args[0]
+ idx := x0.Args[1]
+ mem := x0.Args[2]
+ s1 := v.Args[1]
+ if s1.Op != OpAMD64SHLQconst {
+ break
+ }
+ j1 := s1.AuxInt
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWloadidx1 {
+ break
+ }
+ i1 := x1.AuxInt
+ if x1.Aux != s {
+ break
+ }
+ _ = x1.Args[2]
+ if idx != x1.Args[0] {
+ break
+ }
+ if p != x1.Args[1] {
+ break
+ }
+ if mem != x1.Args[2] {
+ break
+ }
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = j0
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v2.AuxInt = i0
+ v2.Aux = s
+ v2.AddArg(p)
+ v2.AddArg(idx)
+ v2.AddArg(mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v0.AddArg(y)
+ return true
+ }
// match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
// cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y)
v0.AddArg(v1)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)))
// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
v0.AddArg(v1)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
v0.AddArg(v1)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem))
// cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWloadidx1 [i0] {s} p idx mem))
v0.AddArg(v1)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
v0.AddArg(v1)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))
// cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLloadidx1 [i0] {s} p idx mem))
v0.AddArg(v1)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)))
// cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)
// result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQloadidx1 [i0] {s} p idx mem))
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))
// cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWloadidx1 [i0] {s} p idx mem))) y)
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
v0.AddArg(y)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
v0.AddArg(y)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))))
// cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)
// result: @mergePoint(b,x0,x1) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLloadidx1 [i0] {s} p idx mem))) y)
return false
}
func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (ORQconst [c] x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTSQconst [log2(c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
// match: (ORQconst [0] x)
// cond:
// result: x
v.AddArg(v0)
return true
}
+ // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTQconst [31] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTQconst [31] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTQconst [0] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTQconst [0] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTLconst [0] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTLconst [0] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ z2 z1:(SHRQconst [63] x)))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTLconst [31] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTL z2 z1:(SHRLconst [31] x)))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAE (BTLconst [31] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQ_20(v *Value) bool {
// match: (SETEQ (InvertFlags x))
// cond:
// result: (SETEQ x)
v.AuxInt = 1
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool {
// match: (SETEQ (FlagLT_ULT))
// cond:
// result: (MOVLconst [0])
v.AddArg(mem)
return true
}
+ // match: (SETEQmem [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SETEQmem [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETEQmem [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETAEmem [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETAEmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQmem_20(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
// result: (SETEQmem [off] {sym} ptr x mem)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool {
- b := v.Block
- _ = b
// match: (SETEQmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (SETEQmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
v.AddArg(v0)
return true
}
+ // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTQconst [63] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTQconst [63] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTQconst [31] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTQconst [31] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTQconst [0] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTQconst [0] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTLconst [0] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTLconst [0] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTQconst [63] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ z2 z1:(SHRQconst [63] x)))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTQconst [63] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTLconst [31] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z1 := v_0.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v_0.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTL z2 z1:(SHRLconst [31] x)))
+ // cond: z1==z2 && !config.nacl
+ // result: (SETB (BTLconst [31] x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ z2 := v_0.Args[0]
+ z1 := v_0.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNE_20(v *Value) bool {
// match: (SETNE (InvertFlags x))
// cond:
// result: (SETNE x)
v.AuxInt = 0
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool {
// match: (SETNE (FlagLT_ULT))
// cond:
// result: (MOVLconst [1])
v.AddArg(mem)
return true
}
+ // match: (SETNEmem [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SETNEmem [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTL z2 z1:(SHLLconst [31] (SHRLconst [31] x))) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTQ z2 z1:(SHRQconst [63] x)) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z1 := v_1.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v_1.Args[1]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (SETNEmem [off] {sym} ptr (TESTL z2 z1:(SHRLconst [31] x)) mem)
+ // cond: z1==z2 && !config.nacl
+ // result: (SETBmem [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ z2 := v_1.Args[0]
+ z1 := v_1.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ mem := v.Args[2]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64SETBmem)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNEmem_20(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem)
// cond:
// result: (SETNEmem [off] {sym} ptr x mem)
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool {
- b := v.Block
- _ = b
// match: (SETNEmem [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
// result: (SETNEmem [off1+off2] {mergeSym(sym1,sym2)} base val mem)
return false
}
func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SHLLconst [1] (SHRLconst [1] x))
+ // cond: !config.nacl
+ // result: (BTRLconst [0] x)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
// match: (SHLLconst x [0])
// cond:
// result: x
return false
}
func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SHLQconst [1] (SHRQconst [1] x))
+ // cond: !config.nacl
+ // result: (BTRQconst [0] x)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = 0
+ v.AddArg(x)
+ return true
+ }
// match: (SHLQconst x [0])
// cond:
// result: x
return false
}
func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SHRLconst [1] (SHLLconst [1] x))
+ // cond: !config.nacl
+ // result: (BTRLconst [31] x)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = 31
+ v.AddArg(x)
+ return true
+ }
// match: (SHRLconst x [0])
// cond:
// result: x
return false
}
func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (SHRQconst [1] (SHLQconst [1] x))
+ // cond: !config.nacl
+ // result: (BTRQconst [63] x)
+ for {
+ if v.AuxInt != 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if v_0.AuxInt != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = 63
+ v.AddArg(x)
+ return true
+ }
// match: (SHRQconst x [0])
// cond:
// result: x
return false
}
func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (XORL (SHLL (MOVLconst [1]) y) x)
+ // cond: !config.nacl
+ // result: (BTCL x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_0_0.AuxInt != 1 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XORL x (SHLL (MOVLconst [1]) y))
+ // cond: !config.nacl
+ // result: (BTCL x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ if v_1_0.AuxInt != 1 {
+ break
+ }
+ y := v_1.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCL)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTCLconst [log2uint32(c)] x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCLconst)
+ v.AuxInt = log2uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORL x (MOVLconst [c]))
+ // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTCLconst [log2uint32(c)] x)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCLconst)
+ v.AuxInt = log2uint32(c)
+ v.AddArg(x)
+ return true
+ }
// match: (XORL x (MOVLconst [c]))
// cond:
// result: (XORLconst [c] x)
v.AddArg(x)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool {
// match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
// cond: d==8-c && c < 8 && t.Size() == 1
// result: (ROLBconst x [c])
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool {
// match: (XORL l:(MOVLload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
// result: (XORLmem x [off] {sym} ptr mem)
return false
}
func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (XORLconst [c] x)
+ // cond: isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTCLconst [log2uint32(c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCLconst)
+ v.AuxInt = log2uint32(c)
+ v.AddArg(x)
+ return true
+ }
// match: (XORLconst [1] (SETNE x))
// cond:
// result: (SETEQ x)
v.AddArg(x)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool {
// match: (XORLconst [1] (SETA x))
// cond:
// result: (SETBE x)
v.AddArg(x)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool {
// match: (XORLconst [c] (XORLconst [d] x))
// cond:
// result: (XORLconst [c ^ d] x)
return false
}
func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (XORQ (SHLQ (MOVQconst [1]) y) x)
+ // cond: !config.nacl
+ // result: (BTCQ x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_0_0.AuxInt != 1 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XORQ x (SHLQ (MOVQconst [1]) y))
+ // cond: !config.nacl
+ // result: (BTCQ x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64SHLQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ if v_1_0.AuxInt != 1 {
+ break
+ }
+ y := v_1.Args[1]
+ if !(!config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCQ)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTCQconst [log2(c)] x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCQconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTCQconst [log2(c)] x)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCQconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
// match: (XORQ x (MOVQconst [c]))
// cond: is32Bit(c)
// result: (XORQconst [c] x)
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool {
// match: (XORQ l:(MOVQload [off] {sym} ptr mem) x)
// cond: canMergeLoad(v, l, x) && clobber(l)
// result: (XORQmem x [off] {sym} ptr mem)
return false
}
func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ // match: (XORQconst [c] x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
+ // result: (BTCQconst [log2(c)] x)
+ for {
+ c := v.AuxInt
+ x := v.Args[0]
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
+ break
+ }
+ v.reset(OpAMD64BTCQconst)
+ v.AuxInt = log2(c)
+ v.AddArg(x)
+ return true
+ }
// match: (XORQconst [c] (XORQconst [d] x))
// cond:
// result: (XORQconst [c ^ d] x)
b.Aux = nil
return true
}
+ // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTQconst [63] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTQconst [63] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTQconst [31] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTQconst [31] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTQconst [0] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTQconst [0] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTLconst [0] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTLconst [0] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTQconst [63] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTQ z2 z1:(SHRQconst [63] x)))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTQconst [63] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTLconst [31] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (EQ (TESTL z2 z1:(SHRLconst [31] x)))
+ // cond: z1==z2 && !config.nacl
+ // result: (UGE (BTLconst [31] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64UGE
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
// match: (EQ (InvertFlags cmp) yes no)
// cond:
// result: (EQ cmp yes no)
b.Aux = nil
return true
}
+ // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTQconst [63] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTQ z2 z1:(SHLQconst [63] (SHRQconst [63] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTQconst [63] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTQconst [31] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTL z2 z1:(SHLLconst [31] (SHRQconst [31] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTQconst [31] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTQconst [0] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTQ z2 z1:(SHRQconst [63] (SHLQconst [63] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTQconst [0] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ if z1_0.AuxInt != 63 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTLconst [0] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTL z2 z1:(SHRLconst [31] (SHLLconst [31] x))))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTLconst [0] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ if z1_0.AuxInt != 31 {
+ break
+ }
+ x := z1_0.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 0
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTQconst [63] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTQ z2 z1:(SHRQconst [63] x)))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTQconst [63] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHRQconst {
+ break
+ }
+ if z1.AuxInt != 63 {
+ break
+ }
+ x := z1.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = 63
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTLconst [31] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z1 := v.Args[0]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ z2 := v.Args[1]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
+ // match: (NE (TESTL z2 z1:(SHRLconst [31] x)))
+ // cond: z1==z2 && !config.nacl
+ // result: (ULT (BTLconst [31] x))
+ for {
+ v := b.Control
+ if v.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v.Args[1]
+ z2 := v.Args[0]
+ z1 := v.Args[1]
+ if z1.Op != OpAMD64SHRLconst {
+ break
+ }
+ if z1.AuxInt != 31 {
+ break
+ }
+ x := z1.Args[0]
+ if !(z1 == z2 && !config.nacl) {
+ break
+ }
+ b.Kind = BlockAMD64ULT
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = 31
+ v0.AddArg(x)
+ b.SetControl(v0)
+ b.Aux = nil
+ return true
+ }
// match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
// cond:
// result: (UGT cmp yes no)