From: Ilya Tocar Date: Tue, 3 Oct 2017 19:12:00 +0000 (-0500) Subject: cmd/compile/internal/amd64: add SETccmem X-Git-Tag: go1.10beta1~853 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=6b8a3c88894cdd2f501a6497c1fc5cfd89861a7f;p=gostls13.git cmd/compile/internal/amd64: add SETccmem Combine setcc and store of result into setcc that writes directly to memory. Triggers 200+ times in go tool. Fixes #21630 Change-Id: Iafa22607426f4120140c88fae4b9aecb46e0bba8 Reviewed-on: https://go-review.googlesource.com/67950 Run-TryBot: Ilya Tocar TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 22e69aa514..7980e6cc15 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -841,6 +841,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE, ssa.OpAMD64SETL, ssa.OpAMD64SETLE, ssa.OpAMD64SETG, ssa.OpAMD64SETGE, @@ -852,6 +853,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpAMD64SETEQmem, ssa.OpAMD64SETNEmem, + ssa.OpAMD64SETLmem, ssa.OpAMD64SETLEmem, + ssa.OpAMD64SETGmem, ssa.OpAMD64SETGEmem, + ssa.OpAMD64SETBmem, ssa.OpAMD64SETBEmem, + ssa.OpAMD64SETAmem, ssa.OpAMD64SETAEmem: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux(&p.To, v) + case ssa.OpAMD64SETNEF: p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/gc/asm_test.go b/src/cmd/compile/internal/gc/asm_test.go index 97d5b7f096..cccc9331e5 100644 --- a/src/cmd/compile/internal/gc/asm_test.go +++ b/src/cmd/compile/internal/gc/asm_test.go @@ -1120,6 +1120,14 @@ var linuxAMD64Tests = []*asmTest{ `, pos: []string{"\tMOVL\t[^X].*, X.*"}, }, + { + fn: ` + func $(x uint32) bool { + return x > 4 + } + `, + pos: []string{"\tSETHI\t\\("}, + }, } var linux386Tests = []*asmTest{ diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index bcc8378a4e..7e5aab7bc2 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -486,6 +486,17 @@ (Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base) (Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base) +(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 -> (SETGmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 -> (SETGEmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 -> (SETEQmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 -> (SETNEmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 -> (SETBmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 -> (SETBEmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 -> (SETAmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 -> (SETAEmem [off] {sym} ptr x mem) + // block rewrites (If (SETL cmp) yes no) -> (LT cmp yes no) (If (SETLE cmp) yes no) -> (LE cmp yes no) @@ -580,6 +591,17 @@ (SETEQ (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x)) (SETNE (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETB (BTQconst [log2(c)] x)) (SETEQ (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x)) +// SET..mem variant +(SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl -> (SETBmem [off] {sym} ptr (BTL x y) mem) +(SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl -> (SETAEmem [off] {sym} ptr (BTL x y) mem) +(SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl -> (SETBmem [off] {sym} ptr (BTQ x y) mem) +(SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQ x y) mem) +(SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) +(SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) +(SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) +(SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) +(SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) +(SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) // Fold boolean negation into SETcc. (XORLconst [1] (SETNE x)) -> (SETEQ x) @@ -969,6 +991,17 @@ (SETEQ (InvertFlags x)) -> (SETEQ x) (SETNE (InvertFlags x)) -> (SETNE x) +(SETLmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGmem [off] {sym} ptr x mem) +(SETGmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLmem [off] {sym} ptr x mem) +(SETBmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAmem [off] {sym} ptr x mem) +(SETAmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBmem [off] {sym} ptr x mem) +(SETLEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGEmem [off] {sym} ptr x mem) +(SETGEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLEmem [off] {sym} ptr x mem) +(SETBEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAEmem [off] {sym} ptr x mem) +(SETAEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBEmem [off] {sym} ptr x mem) +(SETEQmem [off] {sym} ptr (InvertFlags x) mem) -> (SETEQmem [off] {sym} ptr x mem) +(SETNEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETNEmem [off] {sym} ptr x mem) + // sign extended loads // Note: The combined instruction must end up in the same block // as the original load. If not, we end up making a value with diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 51dc3d33ee..c251f7e657 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -355,6 +355,17 @@ func init() { {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0 {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0 {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0 + // Variants that store result to memory + {name: "SETEQmem", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETNEmem", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETLmem", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETLEmem", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETGmem", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETGEmem", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETBmem", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETBEmem", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETAmem", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETAEmem", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem // Need different opcodes for floating point conditions because // any comparison involving a NaN is always FALSE and thus // the patterns for inverting conditions cannot be used. diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 39b9be3e11..a8c7a52c7d 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -568,6 +568,16 @@ const ( OpAMD64SETBE OpAMD64SETA OpAMD64SETAE + OpAMD64SETEQmem + OpAMD64SETNEmem + OpAMD64SETLmem + OpAMD64SETLEmem + OpAMD64SETGmem + OpAMD64SETGEmem + OpAMD64SETBmem + OpAMD64SETBEmem + OpAMD64SETAmem + OpAMD64SETAEmem OpAMD64SETEQF OpAMD64SETNEF OpAMD64SETORD @@ -6761,6 +6771,136 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SETEQmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETNEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETLmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETLEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETGmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETGEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETGE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETBmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETCS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETBEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETAmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETAEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, { name: "SETEQF", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index c2f71a41cd..d130081c87 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -100,7 +100,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64MOVBloadidx1: return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) case OpAMD64MOVBstore: - return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) + return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) case OpAMD64MOVBstoreconst: return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) case OpAMD64MOVBstoreconstidx1: @@ -301,22 +301,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64SETA_0(v) case OpAMD64SETAE: return rewriteValueAMD64_OpAMD64SETAE_0(v) + case OpAMD64SETAEmem: + return rewriteValueAMD64_OpAMD64SETAEmem_0(v) + case OpAMD64SETAmem: + return rewriteValueAMD64_OpAMD64SETAmem_0(v) case OpAMD64SETB: return rewriteValueAMD64_OpAMD64SETB_0(v) case OpAMD64SETBE: return rewriteValueAMD64_OpAMD64SETBE_0(v) + case OpAMD64SETBEmem: + return rewriteValueAMD64_OpAMD64SETBEmem_0(v) + case OpAMD64SETBmem: + return rewriteValueAMD64_OpAMD64SETBmem_0(v) case OpAMD64SETEQ: return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) + case OpAMD64SETEQmem: + return rewriteValueAMD64_OpAMD64SETEQmem_0(v) case OpAMD64SETG: return rewriteValueAMD64_OpAMD64SETG_0(v) case OpAMD64SETGE: return rewriteValueAMD64_OpAMD64SETGE_0(v) + case OpAMD64SETGEmem: + return rewriteValueAMD64_OpAMD64SETGEmem_0(v) + case OpAMD64SETGmem: + return rewriteValueAMD64_OpAMD64SETGmem_0(v) case OpAMD64SETL: return rewriteValueAMD64_OpAMD64SETL_0(v) case OpAMD64SETLE: return rewriteValueAMD64_OpAMD64SETLE_0(v) + case OpAMD64SETLEmem: + return rewriteValueAMD64_OpAMD64SETLEmem_0(v) + case OpAMD64SETLmem: + return rewriteValueAMD64_OpAMD64SETLmem_0(v) case OpAMD64SETNE: return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) + case OpAMD64SETNEmem: + return rewriteValueAMD64_OpAMD64SETNEmem_0(v) case OpAMD64SHLL: return rewriteValueAMD64_OpAMD64SHLL_0(v) case OpAMD64SHLLconst: @@ -5190,6 +5210,259 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { + // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) + // cond: y.Uses == 1 + // result: (SETLmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETL { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETLmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) + // cond: y.Uses == 1 + // result: (SETLEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETLE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETLEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) + // cond: y.Uses == 1 + // result: (SETGmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETG { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETGmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) + // cond: y.Uses == 1 + // result: (SETGEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETGE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETGEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) + // cond: y.Uses == 1 + // result: (SETEQmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETEQ { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETEQmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) + // cond: y.Uses == 1 + // result: (SETNEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETNE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETNEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) + // cond: y.Uses == 1 + // result: (SETBmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETB { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) + // cond: y.Uses == 1 + // result: (SETBEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETBE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETBEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) + // cond: y.Uses == 1 + // result: (SETAmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETA { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETAmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) + // cond: y.Uses == 1 + // result: (SETAEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETAE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { b := v.Block _ = b // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) @@ -5707,7 +5980,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { +func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types @@ -35562,6 +35835,56 @@ func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool { + // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETBEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETBEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool { + // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETBmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { // match: (SETB (InvertFlags x)) // cond: @@ -35714,6 +36037,56 @@ func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool { + // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETAEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool { + // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETAmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETAmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { b := v.Block _ = b @@ -36019,6 +36392,329 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) + // cond: !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64SHLL { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVLconst { + break + } + if v_1_0_0.AuxInt != 1 { + break + } + x := v_1_0.Args[1] + y := v_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) + // cond: !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64SHLL { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64MOVLconst { + break + } + if v_1_1_0.AuxInt != 1 { + break + } + x := v_1_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) + // cond: !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64SHLQ { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVQconst { + break + } + if v_1_0_0.AuxInt != 1 { + break + } + x := v_1_0.Args[1] + y := v_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) + // cond: !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64SHLQ { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64MOVQconst { + break + } + if v_1_1_0.AuxInt != 1 { + break + } + x := v_1_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTLconst { + break + } + c := v_1.AuxInt + x := v_1.Args[0] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQconst { + break + } + c := v_1.AuxInt + x := v_1.Args[0] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64MOVQconst { + break + } + c := v_1_0.AuxInt + x := v_1.Args[1] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64MOVQconst { + break + } + c := v_1_1.AuxInt + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETEQmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETEQmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { // match: (SETG (InvertFlags x)) // cond: @@ -36171,6 +36867,56 @@ func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool { + // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETLEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETLEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool { + // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETLmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETLmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { // match: (SETL (InvertFlags x)) // cond: @@ -36323,6 +37069,56 @@ func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool { + // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETGEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETGEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool { + // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETGmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETGmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { b := v.Block _ = b @@ -36628,6 +37424,329 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) + // cond: !config.nacl + // result: (SETBmem [off] {sym} ptr (BTL x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64SHLL { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVLconst { + break + } + if v_1_0_0.AuxInt != 1 { + break + } + x := v_1_0.Args[1] + y := v_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) + // cond: !config.nacl + // result: (SETBmem [off] {sym} ptr (BTL x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64SHLL { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64MOVLconst { + break + } + if v_1_1_0.AuxInt != 1 { + break + } + x := v_1_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) + // cond: !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64SHLQ { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVQconst { + break + } + if v_1_0_0.AuxInt != 1 { + break + } + x := v_1_0.Args[1] + y := v_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) + // cond: !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64SHLQ { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64MOVQconst { + break + } + if v_1_1_0.AuxInt != 1 { + break + } + x := v_1_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl + // result: (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTLconst { + break + } + c := v_1.AuxInt + x := v_1.Args[0] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQconst { + break + } + c := v_1.AuxInt + x := v_1.Args[0] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64MOVQconst { + break + } + c := v_1_0.AuxInt + x := v_1.Args[1] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64MOVQconst { + break + } + c := v_1_1.AuxInt + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETNEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETNEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { b := v.Block _ = b