]> Cypherpunks repositories - gostls13.git/commitdiff
[dev.ssa] cmd/compile: refactor out rulegen value parsing
authorJosh Bleecher Snyder <josharian@gmail.com>
Fri, 1 Jul 2016 18:05:29 +0000 (11:05 -0700)
committerJosh Bleecher Snyder <josharian@gmail.com>
Wed, 3 Aug 2016 22:51:51 +0000 (22:51 +0000)
Previously, genMatch0 and genResult0 contained
lots of duplication: locating the op, parsing
the value, validation, etc.
Parsing and validation was mixed in with code gen.

Extract a helper, parseValue. It is responsible
for parsing the value, locating the op, and doing
shared validation.

As a bonus (and possibly as my original motivation),
make op selection pay attention to the number
of args present.
This allows arch-specific ops to share a name
with generic ops as long as there is no ambiguity.
It also detects and reports unresolved ambiguity,
unlike before, where it would simply always
pick the generic op, with no warning.

Also use parseValue when generating the top-level
op dispatch, to ensure its opinion about ops
matches genMatch0 and genResult0.

The order of statements in the generated code used
to depend on the exact rule. It is now somewhat
independent of the rule. That is the source
of some of the generated code changes in this CL.
See rewritedec64 and rewritegeneric for examples.
It is a one-time change.

The op dispatch switch and functions used to be
sorted by opname without architecture. The sort
now includes the architecture, leading to further
generated code changes.
See rewriteARM and rewriteAMD64 for examples.
Again, it is a one-time change.

There are no functional changes.

Change-Id: I22c989183ad5651741ebdc0566349c5fd6c6b23c
Reviewed-on: https://go-review.googlesource.com/24649
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
src/cmd/compile/internal/ssa/gen/rulegen.go
src/cmd/compile/internal/ssa/rewrite386.go
src/cmd/compile/internal/ssa/rewriteAMD64.go
src/cmd/compile/internal/ssa/rewriteARM.go
src/cmd/compile/internal/ssa/rewriteARM64.go
src/cmd/compile/internal/ssa/rewritePPC64.go
src/cmd/compile/internal/ssa/rewritedec64.go
src/cmd/compile/internal/ssa/rewritegeneric.go

index 0947e65ca711318076951c2970a102b5ed0e6330..0cb428b6faef54f487e8cb79b957b36cc3d9c1e1 100644 (file)
@@ -117,15 +117,17 @@ func genRules(arch arch) {
                if unbalanced(rule) {
                        continue
                }
-               op := strings.Split(rule, " ")[0][1:]
-               if op[len(op)-1] == ')' {
-                       op = op[:len(op)-1] // rule has only opcode, e.g. (ConstNil) -> ...
-               }
+
                loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno)
-               if isBlock(op, arch) {
-                       blockrules[op] = append(blockrules[op], Rule{rule: rule, loc: loc})
+               r := Rule{rule: rule, loc: loc}
+               if rawop := strings.Split(rule, " ")[0][1:]; isBlock(rawop, arch) {
+                       blockrules[rawop] = append(blockrules[rawop], r)
                } else {
-                       oprules[op] = append(oprules[op], Rule{rule: rule, loc: loc})
+                       // Do fancier value op matching.
+                       match, _, _ := r.parse()
+                       op, oparch, _, _, _, _ := parseValue(match, arch, loc)
+                       opname := fmt.Sprintf("Op%s%s", oparch, op.name)
+                       oprules[opname] = append(oprules[opname], r)
                }
                rule = ""
                ruleLineno = 0
@@ -157,8 +159,8 @@ func genRules(arch arch) {
        fmt.Fprintf(w, "func rewriteValue%s(v *Value, config *Config) bool {\n", arch.name)
        fmt.Fprintf(w, "switch v.Op {\n")
        for _, op := range ops {
-               fmt.Fprintf(w, "case %s:\n", opName(op, arch))
-               fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, opName(op, arch))
+               fmt.Fprintf(w, "case %s:\n", op)
+               fmt.Fprintf(w, "return rewriteValue%s_%s(v, config)\n", arch.name, op)
        }
        fmt.Fprintf(w, "}\n")
        fmt.Fprintf(w, "return false\n")
@@ -167,7 +169,7 @@ func genRules(arch arch) {
        // Generate a routine per op. Note that we don't make one giant routine
        // because it is too big for some compilers.
        for _, op := range ops {
-               fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, opName(op, arch))
+               fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, op)
                fmt.Fprintln(w, "b := v.Block")
                fmt.Fprintln(w, "_ = b")
                var canFail bool
@@ -334,141 +336,108 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t
        }
        canFail := false
 
-       // split body up into regions. Split by spaces/tabs, except those
-       // contained in () or {}.
-       s := split(match[1 : len(match)-1]) // remove parens, then split
-
-       // Find op record
-       var op opData
-       for _, x := range genericOps {
-               if x.name == s[0] {
-                       op = x
-                       break
-               }
-       }
-       for _, x := range arch.ops {
-               if x.name == s[0] {
-                       op = x
-                       break
-               }
-       }
-       if op.name == "" {
-               log.Fatalf("%s: unknown op %s", loc, s[0])
-       }
+       op, oparch, typ, auxint, aux, args := parseValue(match, arch, loc)
 
        // check op
        if !top {
-               fmt.Fprintf(w, "if %s.Op != %s {\nbreak\n}\n", v, opName(s[0], arch))
+               fmt.Fprintf(w, "if %s.Op != Op%s%s {\nbreak\n}\n", v, oparch, op.name)
                canFail = true
        }
 
-       // check type/aux/args
-       argnum := 0
-       for _, a := range s[1:] {
-               if a[0] == '<' {
-                       // type restriction
-                       t := a[1 : len(a)-1] // remove <>
-                       if !isVariable(t) {
-                               // code. We must match the results of this code.
-                               fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
+       if typ != "" {
+               if !isVariable(typ) {
+                       // code. We must match the results of this code.
+                       fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
+                       canFail = true
+               } else {
+                       // variable
+                       if _, ok := m[typ]; ok {
+                               // must match previous variable
+                               fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, typ)
                                canFail = true
                        } else {
-                               // variable
-                               if _, ok := m[t]; ok {
-                                       // must match previous variable
-                                       fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
-                                       canFail = true
-                               } else {
-                                       m[t] = struct{}{}
-                                       fmt.Fprintf(w, "%s := %s.Type\n", t, v)
-                               }
+                               m[typ] = struct{}{}
+                               fmt.Fprintf(w, "%s := %s.Type\n", typ, v)
                        }
-               } else if a[0] == '[' {
-                       // auxint restriction
-                       switch op.aux {
-                       case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
-                       default:
-                               log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
-                       }
-                       x := a[1 : len(a)-1] // remove []
-                       if !isVariable(x) {
-                               // code
-                               fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
+               }
+       }
+
+       if auxint != "" {
+               if !isVariable(auxint) {
+                       // code
+                       fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
+                       canFail = true
+               } else {
+                       // variable
+                       if _, ok := m[auxint]; ok {
+                               fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, auxint)
                                canFail = true
                        } else {
-                               // variable
-                               if _, ok := m[x]; ok {
-                                       fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
-                                       canFail = true
-                               } else {
-                                       m[x] = struct{}{}
-                                       fmt.Fprintf(w, "%s := %s.AuxInt\n", x, v)
-                               }
-                       }
-               } else if a[0] == '{' {
-                       // aux restriction
-                       switch op.aux {
-                       case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
-                       default:
-                               log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+                               m[auxint] = struct{}{}
+                               fmt.Fprintf(w, "%s := %s.AuxInt\n", auxint, v)
                        }
-                       x := a[1 : len(a)-1] // remove {}
-                       if !isVariable(x) {
-                               // code
-                               fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
+               }
+       }
+
+       if aux != "" {
+
+               if !isVariable(aux) {
+                       // code
+                       fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
+                       canFail = true
+               } else {
+                       // variable
+                       if _, ok := m[aux]; ok {
+                               fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, aux)
                                canFail = true
                        } else {
-                               // variable
-                               if _, ok := m[x]; ok {
-                                       fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
-                                       canFail = true
-                               } else {
-                                       m[x] = struct{}{}
-                                       fmt.Fprintf(w, "%s := %s.Aux\n", x, v)
-                               }
+                               m[aux] = struct{}{}
+                               fmt.Fprintf(w, "%s := %s.Aux\n", aux, v)
                        }
-               } else if a == "_" {
-                       argnum++
-               } else if !strings.Contains(a, "(") {
+               }
+       }
+
+       for i, arg := range args {
+               if arg == "_" {
+                       continue
+               }
+               if !strings.Contains(arg, "(") {
                        // leaf variable
-                       if _, ok := m[a]; ok {
+                       if _, ok := m[arg]; ok {
                                // variable already has a definition. Check whether
                                // the old definition and the new definition match.
                                // For example, (add x x).  Equality is just pointer equality
                                // on Values (so cse is important to do before lowering).
-                               fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", a, v, argnum)
+                               fmt.Fprintf(w, "if %s != %s.Args[%d] {\nbreak\n}\n", arg, v, i)
                                canFail = true
                        } else {
                                // remember that this variable references the given value
-                               m[a] = struct{}{}
-                               fmt.Fprintf(w, "%s := %s.Args[%d]\n", a, v, argnum)
+                               m[arg] = struct{}{}
+                               fmt.Fprintf(w, "%s := %s.Args[%d]\n", arg, v, i)
                        }
-                       argnum++
+                       continue
+               }
+               // compound sexpr
+               var argname string
+               colon := strings.Index(arg, ":")
+               openparen := strings.Index(arg, "(")
+               if colon >= 0 && openparen >= 0 && colon < openparen {
+                       // rule-specified name
+                       argname = arg[:colon]
+                       arg = arg[colon+1:]
                } else {
-                       // compound sexpr
-                       var argname string
-                       colon := strings.Index(a, ":")
-                       openparen := strings.Index(a, "(")
-                       if colon >= 0 && openparen >= 0 && colon < openparen {
-                               // rule-specified name
-                               argname = a[:colon]
-                               a = a[colon+1:]
-                       } else {
-                               // autogenerated name
-                               argname = fmt.Sprintf("%s_%d", v, argnum)
-                       }
-                       fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, argnum)
-                       if genMatch0(w, arch, a, argname, m, false, loc) {
-                               canFail = true
-                       }
-                       argnum++
+                       // autogenerated name
+                       argname = fmt.Sprintf("%s_%d", v, i)
+               }
+               fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, i)
+               if genMatch0(w, arch, arg, argname, m, false, loc) {
+                       canFail = true
                }
        }
+
        if op.argLength == -1 {
-               fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, argnum)
+               fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, len(args))
                canFail = true
-       } else if int(op.argLength) != argnum {
-               log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
        }
        return canFail
 }
@@ -500,105 +469,44 @@ func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move boo
                return result
        }
 
-       s := split(result[1 : len(result)-1]) // remove parens, then split
-
-       // Find op record
-       var op opData
-       for _, x := range genericOps {
-               if x.name == s[0] {
-                       op = x
-                       break
-               }
-       }
-       for _, x := range arch.ops {
-               if x.name == s[0] {
-                       op = x
-                       break
-               }
-       }
-       if op.name == "" {
-               log.Fatalf("%s: unknown op %s", loc, s[0])
-       }
+       op, oparch, typ, auxint, aux, args := parseValue(result, arch, loc)
 
        // Find the type of the variable.
-       var opType string
-       var typeOverride bool
-       for _, a := range s[1:] {
-               if a[0] == '<' {
-                       // type restriction
-                       opType = a[1 : len(a)-1] // remove <>
-                       typeOverride = true
-                       break
-               }
-       }
-       if opType == "" {
-               // find default type, if any
-               for _, op := range arch.ops {
-                       if op.name == s[0] && op.typ != "" {
-                               opType = typeName(op.typ)
-                               break
-                       }
-               }
-       }
-       if opType == "" {
-               for _, op := range genericOps {
-                       if op.name == s[0] && op.typ != "" {
-                               opType = typeName(op.typ)
-                               break
-                       }
-               }
+       typeOverride := typ != ""
+       if typ == "" && op.typ != "" {
+               typ = typeName(op.typ)
        }
+
        var v string
        if top && !move {
                v = "v"
-               fmt.Fprintf(w, "v.reset(%s)\n", opName(s[0], arch))
+               fmt.Fprintf(w, "v.reset(Op%s%s)\n", oparch, op.name)
                if typeOverride {
-                       fmt.Fprintf(w, "v.Type = %s\n", opType)
+                       fmt.Fprintf(w, "v.Type = %s\n", typ)
                }
        } else {
-               if opType == "" {
-                       log.Fatalf("sub-expression %s (op=%s) must have a type", result, s[0])
+               if typ == "" {
+                       log.Fatalf("sub-expression %s (op=Op%s%s) must have a type", result, oparch, op.name)
                }
                v = fmt.Sprintf("v%d", *alloc)
                *alloc++
-               fmt.Fprintf(w, "%s := b.NewValue0(v.Line, %s, %s)\n", v, opName(s[0], arch), opType)
+               fmt.Fprintf(w, "%s := b.NewValue0(v.Line, Op%s%s, %s)\n", v, oparch, op.name, typ)
                if move && top {
                        // Rewrite original into a copy
                        fmt.Fprintf(w, "v.reset(OpCopy)\n")
                        fmt.Fprintf(w, "v.AddArg(%s)\n", v)
                }
        }
-       argnum := 0
-       for _, a := range s[1:] {
-               if a[0] == '<' {
-                       // type restriction, handled above
-               } else if a[0] == '[' {
-                       // auxint restriction
-                       switch op.aux {
-                       case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
-                       default:
-                               log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
-                       }
-                       x := a[1 : len(a)-1] // remove []
-                       fmt.Fprintf(w, "%s.AuxInt = %s\n", v, x)
-               } else if a[0] == '{' {
-                       // aux restriction
-                       switch op.aux {
-                       case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
-                       default:
-                               log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
-                       }
-                       x := a[1 : len(a)-1] // remove {}
-                       fmt.Fprintf(w, "%s.Aux = %s\n", v, x)
-               } else {
-                       // regular argument (sexpr or variable)
-                       x := genResult0(w, arch, a, alloc, false, move, loc)
-                       fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
-                       argnum++
-               }
+
+       if auxint != "" {
+               fmt.Fprintf(w, "%s.AuxInt = %s\n", v, auxint)
+       }
+       if aux != "" {
+               fmt.Fprintf(w, "%s.Aux = %s\n", v, aux)
        }
-       if op.argLength != -1 && int(op.argLength) != argnum {
-               log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum)
+       for _, arg := range args {
+               x := genResult0(w, arch, arg, alloc, false, move, loc)
+               fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x)
        }
 
        return v
@@ -666,16 +574,102 @@ func isBlock(name string, arch arch) bool {
        return false
 }
 
-// opName converts from an op name specified in a rule file to an Op enum.
-// if the name matches a generic op, returns "Op" plus the specified name.
-// Otherwise, returns "Op" plus arch name plus op name.
-func opName(name string, arch arch) string {
-       for _, op := range genericOps {
-               if op.name == name {
-                       return "Op" + name
+// parseValue parses a parenthesized value from a rule.
+// The value can be from the match or the result side.
+// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args.
+// oparch is the architecture that op is located in, or "" for generic.
+func parseValue(val string, arch arch, loc string) (op opData, oparch string, typ string, auxint string, aux string, args []string) {
+       val = val[1 : len(val)-1] // remove ()
+
+       // Split val up into regions.
+       // Split by spaces/tabs, except those contained in (), {}, [], or <>.
+       s := split(val)
+
+       // Extract restrictions and args.
+       for _, a := range s[1:] {
+               switch a[0] {
+               case '<':
+                       typ = a[1 : len(a)-1] // remove <>
+               case '[':
+                       auxint = a[1 : len(a)-1] // remove []
+               case '{':
+                       aux = a[1 : len(a)-1] // remove {}
+               default:
+                       args = append(args, a)
+               }
+       }
+
+       // Resolve the op.
+
+       // match reports whether x is a good op to select.
+       // If strict is true, rule generation might succeed.
+       // If strict is false, rule generation has failed,
+       // but we're trying to generate a useful error.
+       // Doing strict=true then strict=false allows
+       // precise op matching while retaining good error messages.
+       match := func(x opData, strict bool, archname string) bool {
+               if x.name != s[0] {
+                       return false
+               }
+               if x.argLength != -1 && int(x.argLength) != len(args) {
+                       if strict {
+                               return false
+                       } else {
+                               log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s[0], archname, op.argLength, len(args))
+                       }
+               }
+               return true
+       }
+
+       for _, x := range genericOps {
+               if match(x, true, "generic") {
+                       op = x
+                       break
                }
        }
-       return "Op" + arch.name + name
+       if arch.name != "generic" {
+               for _, x := range arch.ops {
+                       if match(x, true, arch.name) {
+                               if op.name != "" {
+                                       log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name)
+                               }
+                               op = x
+                               oparch = arch.name
+                               break
+                       }
+               }
+       }
+
+       if op.name == "" {
+               // Failed to find the op.
+               // Run through everything again with strict=false
+               // to generate useful diagnosic messages before failing.
+               for _, x := range genericOps {
+                       match(x, false, "generic")
+               }
+               for _, x := range arch.ops {
+                       match(x, false, arch.name)
+               }
+               log.Fatalf("%s: unknown op %s", loc, s)
+       }
+
+       // Sanity check aux, auxint.
+       if auxint != "" {
+               switch op.aux {
+               case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32":
+               default:
+                       log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
+               }
+       }
+       if aux != "" {
+               switch op.aux {
+               case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32":
+               default:
+                       log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+               }
+       }
+
+       return
 }
 
 func blockName(name string, arch arch) string {
index a6ded59452979b2237b3134691475e3953cb9541..d54a9cbc08a08168a0ea86a01726ae08b04e7d4f 100644 (file)
@@ -20,6 +20,184 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_Op386ANDL(v, config)
        case Op386ANDLconst:
                return rewriteValue386_Op386ANDLconst(v, config)
+       case Op386CMPB:
+               return rewriteValue386_Op386CMPB(v, config)
+       case Op386CMPBconst:
+               return rewriteValue386_Op386CMPBconst(v, config)
+       case Op386CMPL:
+               return rewriteValue386_Op386CMPL(v, config)
+       case Op386CMPLconst:
+               return rewriteValue386_Op386CMPLconst(v, config)
+       case Op386CMPW:
+               return rewriteValue386_Op386CMPW(v, config)
+       case Op386CMPWconst:
+               return rewriteValue386_Op386CMPWconst(v, config)
+       case Op386LEAL:
+               return rewriteValue386_Op386LEAL(v, config)
+       case Op386LEAL1:
+               return rewriteValue386_Op386LEAL1(v, config)
+       case Op386LEAL2:
+               return rewriteValue386_Op386LEAL2(v, config)
+       case Op386LEAL4:
+               return rewriteValue386_Op386LEAL4(v, config)
+       case Op386LEAL8:
+               return rewriteValue386_Op386LEAL8(v, config)
+       case Op386MOVBLSX:
+               return rewriteValue386_Op386MOVBLSX(v, config)
+       case Op386MOVBLSXload:
+               return rewriteValue386_Op386MOVBLSXload(v, config)
+       case Op386MOVBLZX:
+               return rewriteValue386_Op386MOVBLZX(v, config)
+       case Op386MOVBload:
+               return rewriteValue386_Op386MOVBload(v, config)
+       case Op386MOVBloadidx1:
+               return rewriteValue386_Op386MOVBloadidx1(v, config)
+       case Op386MOVBstore:
+               return rewriteValue386_Op386MOVBstore(v, config)
+       case Op386MOVBstoreconst:
+               return rewriteValue386_Op386MOVBstoreconst(v, config)
+       case Op386MOVBstoreconstidx1:
+               return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
+       case Op386MOVBstoreidx1:
+               return rewriteValue386_Op386MOVBstoreidx1(v, config)
+       case Op386MOVLload:
+               return rewriteValue386_Op386MOVLload(v, config)
+       case Op386MOVLloadidx1:
+               return rewriteValue386_Op386MOVLloadidx1(v, config)
+       case Op386MOVLloadidx4:
+               return rewriteValue386_Op386MOVLloadidx4(v, config)
+       case Op386MOVLstore:
+               return rewriteValue386_Op386MOVLstore(v, config)
+       case Op386MOVLstoreconst:
+               return rewriteValue386_Op386MOVLstoreconst(v, config)
+       case Op386MOVLstoreconstidx1:
+               return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
+       case Op386MOVLstoreconstidx4:
+               return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
+       case Op386MOVLstoreidx1:
+               return rewriteValue386_Op386MOVLstoreidx1(v, config)
+       case Op386MOVLstoreidx4:
+               return rewriteValue386_Op386MOVLstoreidx4(v, config)
+       case Op386MOVSDload:
+               return rewriteValue386_Op386MOVSDload(v, config)
+       case Op386MOVSDloadidx1:
+               return rewriteValue386_Op386MOVSDloadidx1(v, config)
+       case Op386MOVSDloadidx8:
+               return rewriteValue386_Op386MOVSDloadidx8(v, config)
+       case Op386MOVSDstore:
+               return rewriteValue386_Op386MOVSDstore(v, config)
+       case Op386MOVSDstoreidx1:
+               return rewriteValue386_Op386MOVSDstoreidx1(v, config)
+       case Op386MOVSDstoreidx8:
+               return rewriteValue386_Op386MOVSDstoreidx8(v, config)
+       case Op386MOVSSload:
+               return rewriteValue386_Op386MOVSSload(v, config)
+       case Op386MOVSSloadidx1:
+               return rewriteValue386_Op386MOVSSloadidx1(v, config)
+       case Op386MOVSSloadidx4:
+               return rewriteValue386_Op386MOVSSloadidx4(v, config)
+       case Op386MOVSSstore:
+               return rewriteValue386_Op386MOVSSstore(v, config)
+       case Op386MOVSSstoreidx1:
+               return rewriteValue386_Op386MOVSSstoreidx1(v, config)
+       case Op386MOVSSstoreidx4:
+               return rewriteValue386_Op386MOVSSstoreidx4(v, config)
+       case Op386MOVWLSX:
+               return rewriteValue386_Op386MOVWLSX(v, config)
+       case Op386MOVWLSXload:
+               return rewriteValue386_Op386MOVWLSXload(v, config)
+       case Op386MOVWLZX:
+               return rewriteValue386_Op386MOVWLZX(v, config)
+       case Op386MOVWload:
+               return rewriteValue386_Op386MOVWload(v, config)
+       case Op386MOVWloadidx1:
+               return rewriteValue386_Op386MOVWloadidx1(v, config)
+       case Op386MOVWloadidx2:
+               return rewriteValue386_Op386MOVWloadidx2(v, config)
+       case Op386MOVWstore:
+               return rewriteValue386_Op386MOVWstore(v, config)
+       case Op386MOVWstoreconst:
+               return rewriteValue386_Op386MOVWstoreconst(v, config)
+       case Op386MOVWstoreconstidx1:
+               return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
+       case Op386MOVWstoreconstidx2:
+               return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
+       case Op386MOVWstoreidx1:
+               return rewriteValue386_Op386MOVWstoreidx1(v, config)
+       case Op386MOVWstoreidx2:
+               return rewriteValue386_Op386MOVWstoreidx2(v, config)
+       case Op386MULL:
+               return rewriteValue386_Op386MULL(v, config)
+       case Op386MULLconst:
+               return rewriteValue386_Op386MULLconst(v, config)
+       case Op386NEGL:
+               return rewriteValue386_Op386NEGL(v, config)
+       case Op386NOTL:
+               return rewriteValue386_Op386NOTL(v, config)
+       case Op386ORL:
+               return rewriteValue386_Op386ORL(v, config)
+       case Op386ORLconst:
+               return rewriteValue386_Op386ORLconst(v, config)
+       case Op386ROLBconst:
+               return rewriteValue386_Op386ROLBconst(v, config)
+       case Op386ROLLconst:
+               return rewriteValue386_Op386ROLLconst(v, config)
+       case Op386ROLWconst:
+               return rewriteValue386_Op386ROLWconst(v, config)
+       case Op386SARB:
+               return rewriteValue386_Op386SARB(v, config)
+       case Op386SARBconst:
+               return rewriteValue386_Op386SARBconst(v, config)
+       case Op386SARL:
+               return rewriteValue386_Op386SARL(v, config)
+       case Op386SARLconst:
+               return rewriteValue386_Op386SARLconst(v, config)
+       case Op386SARW:
+               return rewriteValue386_Op386SARW(v, config)
+       case Op386SARWconst:
+               return rewriteValue386_Op386SARWconst(v, config)
+       case Op386SBBL:
+               return rewriteValue386_Op386SBBL(v, config)
+       case Op386SBBLcarrymask:
+               return rewriteValue386_Op386SBBLcarrymask(v, config)
+       case Op386SETA:
+               return rewriteValue386_Op386SETA(v, config)
+       case Op386SETAE:
+               return rewriteValue386_Op386SETAE(v, config)
+       case Op386SETB:
+               return rewriteValue386_Op386SETB(v, config)
+       case Op386SETBE:
+               return rewriteValue386_Op386SETBE(v, config)
+       case Op386SETEQ:
+               return rewriteValue386_Op386SETEQ(v, config)
+       case Op386SETG:
+               return rewriteValue386_Op386SETG(v, config)
+       case Op386SETGE:
+               return rewriteValue386_Op386SETGE(v, config)
+       case Op386SETL:
+               return rewriteValue386_Op386SETL(v, config)
+       case Op386SETLE:
+               return rewriteValue386_Op386SETLE(v, config)
+       case Op386SETNE:
+               return rewriteValue386_Op386SETNE(v, config)
+       case Op386SHLL:
+               return rewriteValue386_Op386SHLL(v, config)
+       case Op386SHRB:
+               return rewriteValue386_Op386SHRB(v, config)
+       case Op386SHRL:
+               return rewriteValue386_Op386SHRL(v, config)
+       case Op386SHRW:
+               return rewriteValue386_Op386SHRW(v, config)
+       case Op386SUBL:
+               return rewriteValue386_Op386SUBL(v, config)
+       case Op386SUBLcarry:
+               return rewriteValue386_Op386SUBLcarry(v, config)
+       case Op386SUBLconst:
+               return rewriteValue386_Op386SUBLconst(v, config)
+       case Op386XORL:
+               return rewriteValue386_Op386XORL(v, config)
+       case Op386XORLconst:
+               return rewriteValue386_Op386XORLconst(v, config)
        case OpAdd16:
                return rewriteValue386_OpAdd16(v, config)
        case OpAdd32:
@@ -48,18 +226,6 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_OpAndB(v, config)
        case OpBswap32:
                return rewriteValue386_OpBswap32(v, config)
-       case Op386CMPB:
-               return rewriteValue386_Op386CMPB(v, config)
-       case Op386CMPBconst:
-               return rewriteValue386_Op386CMPBconst(v, config)
-       case Op386CMPL:
-               return rewriteValue386_Op386CMPL(v, config)
-       case Op386CMPLconst:
-               return rewriteValue386_Op386CMPLconst(v, config)
-       case Op386CMPW:
-               return rewriteValue386_Op386CMPW(v, config)
-       case Op386CMPWconst:
-               return rewriteValue386_Op386CMPWconst(v, config)
        case OpClosureCall:
                return rewriteValue386_OpClosureCall(v, config)
        case OpCom16:
@@ -186,16 +352,6 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_OpIsNonNil(v, config)
        case OpIsSliceInBounds:
                return rewriteValue386_OpIsSliceInBounds(v, config)
-       case Op386LEAL:
-               return rewriteValue386_Op386LEAL(v, config)
-       case Op386LEAL1:
-               return rewriteValue386_Op386LEAL1(v, config)
-       case Op386LEAL2:
-               return rewriteValue386_Op386LEAL2(v, config)
-       case Op386LEAL4:
-               return rewriteValue386_Op386LEAL4(v, config)
-       case Op386LEAL8:
-               return rewriteValue386_Op386LEAL8(v, config)
        case OpLeq16:
                return rewriteValue386_OpLeq16(v, config)
        case OpLeq16U:
@@ -260,94 +416,6 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_OpLsh8x64(v, config)
        case OpLsh8x8:
                return rewriteValue386_OpLsh8x8(v, config)
-       case Op386MOVBLSX:
-               return rewriteValue386_Op386MOVBLSX(v, config)
-       case Op386MOVBLSXload:
-               return rewriteValue386_Op386MOVBLSXload(v, config)
-       case Op386MOVBLZX:
-               return rewriteValue386_Op386MOVBLZX(v, config)
-       case Op386MOVBload:
-               return rewriteValue386_Op386MOVBload(v, config)
-       case Op386MOVBloadidx1:
-               return rewriteValue386_Op386MOVBloadidx1(v, config)
-       case Op386MOVBstore:
-               return rewriteValue386_Op386MOVBstore(v, config)
-       case Op386MOVBstoreconst:
-               return rewriteValue386_Op386MOVBstoreconst(v, config)
-       case Op386MOVBstoreconstidx1:
-               return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
-       case Op386MOVBstoreidx1:
-               return rewriteValue386_Op386MOVBstoreidx1(v, config)
-       case Op386MOVLload:
-               return rewriteValue386_Op386MOVLload(v, config)
-       case Op386MOVLloadidx1:
-               return rewriteValue386_Op386MOVLloadidx1(v, config)
-       case Op386MOVLloadidx4:
-               return rewriteValue386_Op386MOVLloadidx4(v, config)
-       case Op386MOVLstore:
-               return rewriteValue386_Op386MOVLstore(v, config)
-       case Op386MOVLstoreconst:
-               return rewriteValue386_Op386MOVLstoreconst(v, config)
-       case Op386MOVLstoreconstidx1:
-               return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
-       case Op386MOVLstoreconstidx4:
-               return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
-       case Op386MOVLstoreidx1:
-               return rewriteValue386_Op386MOVLstoreidx1(v, config)
-       case Op386MOVLstoreidx4:
-               return rewriteValue386_Op386MOVLstoreidx4(v, config)
-       case Op386MOVSDload:
-               return rewriteValue386_Op386MOVSDload(v, config)
-       case Op386MOVSDloadidx1:
-               return rewriteValue386_Op386MOVSDloadidx1(v, config)
-       case Op386MOVSDloadidx8:
-               return rewriteValue386_Op386MOVSDloadidx8(v, config)
-       case Op386MOVSDstore:
-               return rewriteValue386_Op386MOVSDstore(v, config)
-       case Op386MOVSDstoreidx1:
-               return rewriteValue386_Op386MOVSDstoreidx1(v, config)
-       case Op386MOVSDstoreidx8:
-               return rewriteValue386_Op386MOVSDstoreidx8(v, config)
-       case Op386MOVSSload:
-               return rewriteValue386_Op386MOVSSload(v, config)
-       case Op386MOVSSloadidx1:
-               return rewriteValue386_Op386MOVSSloadidx1(v, config)
-       case Op386MOVSSloadidx4:
-               return rewriteValue386_Op386MOVSSloadidx4(v, config)
-       case Op386MOVSSstore:
-               return rewriteValue386_Op386MOVSSstore(v, config)
-       case Op386MOVSSstoreidx1:
-               return rewriteValue386_Op386MOVSSstoreidx1(v, config)
-       case Op386MOVSSstoreidx4:
-               return rewriteValue386_Op386MOVSSstoreidx4(v, config)
-       case Op386MOVWLSX:
-               return rewriteValue386_Op386MOVWLSX(v, config)
-       case Op386MOVWLSXload:
-               return rewriteValue386_Op386MOVWLSXload(v, config)
-       case Op386MOVWLZX:
-               return rewriteValue386_Op386MOVWLZX(v, config)
-       case Op386MOVWload:
-               return rewriteValue386_Op386MOVWload(v, config)
-       case Op386MOVWloadidx1:
-               return rewriteValue386_Op386MOVWloadidx1(v, config)
-       case Op386MOVWloadidx2:
-               return rewriteValue386_Op386MOVWloadidx2(v, config)
-       case Op386MOVWstore:
-               return rewriteValue386_Op386MOVWstore(v, config)
-       case Op386MOVWstoreconst:
-               return rewriteValue386_Op386MOVWstoreconst(v, config)
-       case Op386MOVWstoreconstidx1:
-               return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
-       case Op386MOVWstoreconstidx2:
-               return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
-       case Op386MOVWstoreidx1:
-               return rewriteValue386_Op386MOVWstoreidx1(v, config)
-       case Op386MOVWstoreidx2:
-               return rewriteValue386_Op386MOVWstoreidx2(v, config)
-       case Op386MULL:
-               return rewriteValue386_Op386MULL(v, config)
-       case Op386MULLconst:
-               return rewriteValue386_Op386MULLconst(v, config)
        case OpMod16:
                return rewriteValue386_OpMod16(v, config)
        case OpMod16u:
@@ -374,10 +442,6 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_OpMul64F(v, config)
        case OpMul8:
                return rewriteValue386_OpMul8(v, config)
-       case Op386NEGL:
-               return rewriteValue386_Op386NEGL(v, config)
-       case Op386NOTL:
-               return rewriteValue386_Op386NOTL(v, config)
        case OpNeg16:
                return rewriteValue386_OpNeg16(v, config)
        case OpNeg32:
@@ -406,10 +470,6 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_OpNilCheck(v, config)
        case OpNot:
                return rewriteValue386_OpNot(v, config)
-       case Op386ORL:
-               return rewriteValue386_Op386ORL(v, config)
-       case Op386ORLconst:
-               return rewriteValue386_Op386ORLconst(v, config)
        case OpOffPtr:
                return rewriteValue386_OpOffPtr(v, config)
        case OpOr16:
@@ -420,12 +480,6 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_OpOr8(v, config)
        case OpOrB:
                return rewriteValue386_OpOrB(v, config)
-       case Op386ROLBconst:
-               return rewriteValue386_Op386ROLBconst(v, config)
-       case Op386ROLLconst:
-               return rewriteValue386_Op386ROLLconst(v, config)
-       case Op386ROLWconst:
-               return rewriteValue386_Op386ROLWconst(v, config)
        case OpRsh16Ux16:
                return rewriteValue386_OpRsh16Ux16(v, config)
        case OpRsh16Ux32:
@@ -474,56 +528,6 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_OpRsh8x64(v, config)
        case OpRsh8x8:
                return rewriteValue386_OpRsh8x8(v, config)
-       case Op386SARB:
-               return rewriteValue386_Op386SARB(v, config)
-       case Op386SARBconst:
-               return rewriteValue386_Op386SARBconst(v, config)
-       case Op386SARL:
-               return rewriteValue386_Op386SARL(v, config)
-       case Op386SARLconst:
-               return rewriteValue386_Op386SARLconst(v, config)
-       case Op386SARW:
-               return rewriteValue386_Op386SARW(v, config)
-       case Op386SARWconst:
-               return rewriteValue386_Op386SARWconst(v, config)
-       case Op386SBBL:
-               return rewriteValue386_Op386SBBL(v, config)
-       case Op386SBBLcarrymask:
-               return rewriteValue386_Op386SBBLcarrymask(v, config)
-       case Op386SETA:
-               return rewriteValue386_Op386SETA(v, config)
-       case Op386SETAE:
-               return rewriteValue386_Op386SETAE(v, config)
-       case Op386SETB:
-               return rewriteValue386_Op386SETB(v, config)
-       case Op386SETBE:
-               return rewriteValue386_Op386SETBE(v, config)
-       case Op386SETEQ:
-               return rewriteValue386_Op386SETEQ(v, config)
-       case Op386SETG:
-               return rewriteValue386_Op386SETG(v, config)
-       case Op386SETGE:
-               return rewriteValue386_Op386SETGE(v, config)
-       case Op386SETL:
-               return rewriteValue386_Op386SETL(v, config)
-       case Op386SETLE:
-               return rewriteValue386_Op386SETLE(v, config)
-       case Op386SETNE:
-               return rewriteValue386_Op386SETNE(v, config)
-       case Op386SHLL:
-               return rewriteValue386_Op386SHLL(v, config)
-       case Op386SHRB:
-               return rewriteValue386_Op386SHRB(v, config)
-       case Op386SHRL:
-               return rewriteValue386_Op386SHRL(v, config)
-       case Op386SHRW:
-               return rewriteValue386_Op386SHRW(v, config)
-       case Op386SUBL:
-               return rewriteValue386_Op386SUBL(v, config)
-       case Op386SUBLcarry:
-               return rewriteValue386_Op386SUBLcarry(v, config)
-       case Op386SUBLconst:
-               return rewriteValue386_Op386SUBLconst(v, config)
        case OpSignExt16to32:
                return rewriteValue386_OpSignExt16to32(v, config)
        case OpSignExt8to16:
@@ -560,10 +564,6 @@ func rewriteValue386(v *Value, config *Config) bool {
                return rewriteValue386_OpTrunc32to16(v, config)
        case OpTrunc32to8:
                return rewriteValue386_OpTrunc32to8(v, config)
-       case Op386XORL:
-               return rewriteValue386_Op386XORL(v, config)
-       case Op386XORLconst:
-               return rewriteValue386_Op386XORLconst(v, config)
        case OpXor16:
                return rewriteValue386_OpXor16(v, config)
        case OpXor32:
@@ -1181,330 +1181,120 @@ func rewriteValue386_Op386ANDLconst(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
+func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add16  x y)
+       // match: (CMPB x (MOVLconst [c]))
        // cond:
-       // result: (ADDL  x y)
+       // result: (CMPBconst x [int64(int8(c))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ADDL)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(Op386CMPBconst)
+               v.AuxInt = int64(int8(c))
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32  x y)
+       // match: (CMPB (MOVLconst [c]) x)
        // cond:
-       // result: (ADDL  x y)
+       // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ADDL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(Op386InvertFlags)
+               v0 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v0.AuxInt = int64(int8(c))
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
+       return false
 }
-func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
+func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add32F x y)
-       // cond:
-       // result: (ADDSS x y)
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)==int8(y)
+       // result: (FlagEQ)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ADDSS)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(int8(x) == int8(y)) {
+                       break
+               }
+               v.reset(Op386FlagEQ)
                return true
        }
-}
-func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32carry x y)
-       // cond:
-       // result: (ADDLcarry x y)
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
+       // result: (FlagLT_ULT)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ADDLcarry)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
+                       break
+               }
+               v.reset(Op386FlagLT_ULT)
                return true
        }
-}
-func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32withcarry x y c)
-       // cond:
-       // result: (ADCL x y c)
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
+       // result: (FlagLT_UGT)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               c := v.Args[2]
-               v.reset(Op386ADCL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(c)
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
+                       break
+               }
+               v.reset(Op386FlagLT_UGT)
                return true
        }
-}
-func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add64F x y)
-       // cond:
-       // result: (ADDSD x y)
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
+       // result: (FlagGT_ULT)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ADDSD)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
+                       break
+               }
+               v.reset(Op386FlagGT_ULT)
                return true
        }
-}
-func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add8   x y)
-       // cond:
-       // result: (ADDL  x y)
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
+       // result: (FlagGT_UGT)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ADDL)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (AddPtr x y)
-       // cond:
-       // result: (ADDL  x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ADDL)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValue386_OpAddr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Addr {sym} base)
-       // cond:
-       // result: (LEAL {sym} base)
-       for {
-               sym := v.Aux
-               base := v.Args[0]
-               v.reset(Op386LEAL)
-               v.Aux = sym
-               v.AddArg(base)
-               return true
-       }
-}
-func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And16 x y)
-       // cond:
-       // result: (ANDL x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And32 x y)
-       // cond:
-       // result: (ANDL x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And8  x y)
-       // cond:
-       // result: (ANDL x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValue386_OpAndB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (AndB x y)
-       // cond:
-       // result: (ANDL x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Bswap32 x)
-       // cond:
-       // result: (BSWAPL x)
-       for {
-               x := v.Args[0]
-               v.reset(Op386BSWAPL)
-               v.AddArg(x)
-               return true
-       }
-}
-func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (CMPB x (MOVLconst [c]))
-       // cond:
-       // result: (CMPBconst x [int64(int8(c))])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386CMPBconst)
-               v.AddArg(x)
-               v.AuxInt = int64(int8(c))
-               return true
-       }
-       // match: (CMPB (MOVLconst [c]) x)
-       // cond:
-       // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(Op386InvertFlags)
-               v0 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v0.AddArg(x)
-               v0.AuxInt = int64(int8(c))
-               v.AddArg(v0)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)==int8(y)
-       // result: (FlagEQ)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int8(x) == int8(y)) {
-                       break
-               }
-               v.reset(Op386FlagEQ)
-               return true
-       }
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
-       // result: (FlagLT_ULT)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
-                       break
-               }
-               v.reset(Op386FlagLT_ULT)
-               return true
-       }
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
-       // result: (FlagLT_UGT)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
-                       break
-               }
-               v.reset(Op386FlagLT_UGT)
-               return true
-       }
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
-       // result: (FlagGT_ULT)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               x := v_0.AuxInt
                y := v.AuxInt
-               if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
-                       break
-               }
-               v.reset(Op386FlagGT_ULT)
-               return true
-       }
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
-       // result: (FlagGT_UGT)
-       for {
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
                        break
                }
@@ -1515,12 +1305,12 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
        // cond: 0 <= int8(m) && int8(m) < int8(n)
        // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386ANDLconst {
                        break
                }
                m := v_0.AuxInt
-               n := v.AuxInt
                if !(0 <= int8(m) && int8(m) < int8(n)) {
                        break
                }
@@ -1531,15 +1321,15 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTB x y)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                v_0 := v.Args[0]
                if v_0.Op != Op386ANDL {
                        break
                }
                x := v_0.Args[0]
                y := v_0.Args[1]
-               if v.AuxInt != 0 {
-                       break
-               }
                v.reset(Op386TESTB)
                v.AddArg(x)
                v.AddArg(y)
@@ -1549,15 +1339,15 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTBconst [int64(int8(c))] x)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                v_0 := v.Args[0]
                if v_0.Op != Op386ANDLconst {
                        break
                }
                c := v_0.AuxInt
                x := v_0.Args[0]
-               if v.AuxInt != 0 {
-                       break
-               }
                v.reset(Op386TESTBconst)
                v.AuxInt = int64(int8(c))
                v.AddArg(x)
@@ -1567,10 +1357,10 @@ func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTB x x)
        for {
-               x := v.Args[0]
                if v.AuxInt != 0 {
                        break
                }
+               x := v.Args[0]
                v.reset(Op386TESTB)
                v.AddArg(x)
                v.AddArg(x)
@@ -1592,8 +1382,8 @@ func rewriteValue386_Op386CMPL(v *Value, config *Config) bool {
                }
                c := v_1.AuxInt
                v.reset(Op386CMPLconst)
-               v.AddArg(x)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
        // match: (CMPL (MOVLconst [c]) x)
@@ -1608,8 +1398,8 @@ func rewriteValue386_Op386CMPL(v *Value, config *Config) bool {
                x := v.Args[1]
                v.reset(Op386InvertFlags)
                v0 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v0.AddArg(x)
                v0.AuxInt = c
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
@@ -1622,12 +1412,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond: int32(x)==int32(y)
        // result: (FlagEQ)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) == int32(y)) {
                        break
                }
@@ -1638,12 +1428,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
        // result: (FlagLT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
                        break
                }
@@ -1654,12 +1444,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
        // result: (FlagLT_UGT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
                        break
                }
@@ -1670,12 +1460,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
        // result: (FlagGT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
                        break
                }
@@ -1686,12 +1476,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
        // result: (FlagGT_UGT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
                        break
                }
@@ -1702,12 +1492,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
        // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386SHRLconst {
                        break
                }
                c := v_0.AuxInt
-               n := v.AuxInt
                if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
                        break
                }
@@ -1718,12 +1508,12 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond: 0 <= int32(m) && int32(m) < int32(n)
        // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386ANDLconst {
                        break
                }
                m := v_0.AuxInt
-               n := v.AuxInt
                if !(0 <= int32(m) && int32(m) < int32(n)) {
                        break
                }
@@ -1734,15 +1524,15 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTL x y)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                v_0 := v.Args[0]
                if v_0.Op != Op386ANDL {
                        break
                }
                x := v_0.Args[0]
                y := v_0.Args[1]
-               if v.AuxInt != 0 {
-                       break
-               }
                v.reset(Op386TESTL)
                v.AddArg(x)
                v.AddArg(y)
@@ -1752,15 +1542,15 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTLconst [c] x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ANDLconst {
+               if v.AuxInt != 0 {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               if v.AuxInt != 0 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ANDLconst {
                        break
                }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
                v.reset(Op386TESTLconst)
                v.AuxInt = c
                v.AddArg(x)
@@ -1770,10 +1560,10 @@ func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTL x x)
        for {
-               x := v.Args[0]
                if v.AuxInt != 0 {
                        break
                }
+               x := v.Args[0]
                v.reset(Op386TESTL)
                v.AddArg(x)
                v.AddArg(x)
@@ -1795,8 +1585,8 @@ func rewriteValue386_Op386CMPW(v *Value, config *Config) bool {
                }
                c := v_1.AuxInt
                v.reset(Op386CMPWconst)
-               v.AddArg(x)
                v.AuxInt = int64(int16(c))
+               v.AddArg(x)
                return true
        }
        // match: (CMPW (MOVLconst [c]) x)
@@ -1811,8 +1601,8 @@ func rewriteValue386_Op386CMPW(v *Value, config *Config) bool {
                x := v.Args[1]
                v.reset(Op386InvertFlags)
                v0 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v0.AddArg(x)
                v0.AuxInt = int64(int16(c))
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
@@ -1825,12 +1615,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond: int16(x)==int16(y)
        // result: (FlagEQ)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int16(x) == int16(y)) {
                        break
                }
@@ -1841,12 +1631,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond: int16(x)<int16(y) && uint16(x)<uint16(y)
        // result: (FlagLT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
                        break
                }
@@ -1857,12 +1647,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond: int16(x)<int16(y) && uint16(x)>uint16(y)
        // result: (FlagLT_UGT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
                        break
                }
@@ -1873,12 +1663,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond: int16(x)>int16(y) && uint16(x)<uint16(y)
        // result: (FlagGT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
                        break
                }
@@ -1889,12 +1679,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond: int16(x)>int16(y) && uint16(x)>uint16(y)
        // result: (FlagGT_UGT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
                        break
                }
@@ -1905,12 +1695,12 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond: 0 <= int16(m) && int16(m) < int16(n)
        // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != Op386ANDLconst {
                        break
                }
                m := v_0.AuxInt
-               n := v.AuxInt
                if !(0 <= int16(m) && int16(m) < int16(n)) {
                        break
                }
@@ -1921,15 +1711,15 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTW x y)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                v_0 := v.Args[0]
                if v_0.Op != Op386ANDL {
                        break
                }
                x := v_0.Args[0]
                y := v_0.Args[1]
-               if v.AuxInt != 0 {
-                       break
-               }
                v.reset(Op386TESTW)
                v.AddArg(x)
                v.AddArg(y)
@@ -1939,15 +1729,15 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTWconst [int64(int16(c))] x)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                v_0 := v.Args[0]
                if v_0.Op != Op386ANDLconst {
                        break
                }
                c := v_0.AuxInt
                x := v_0.Args[0]
-               if v.AuxInt != 0 {
-                       break
-               }
                v.reset(Op386TESTWconst)
                v.AuxInt = int64(int16(c))
                v.AddArg(x)
@@ -1957,10 +1747,10 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        // cond:
        // result: (TESTW x x)
        for {
-               x := v.Args[0]
                if v.AuxInt != 0 {
                        break
                }
+               x := v.Args[0]
                v.reset(Op386TESTW)
                v.AddArg(x)
                v.AddArg(x)
@@ -1968,2581 +1758,3163 @@ func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
+func rewriteValue386_Op386LEAL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ClosureCall [argwid] entry closure mem)
-       // cond:
-       // result: (CALLclosure [argwid] entry closure mem)
+       // match: (LEAL [c] {s} (ADDLconst [d] x))
+       // cond: is32Bit(c+d)
+       // result: (LEAL [c+d] {s} x)
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               closure := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386CALLclosure)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(closure)
-               v.AddArg(mem)
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(c + d)) {
+                       break
+               }
+               v.reset(Op386LEAL)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValue386_OpCom16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com16 x)
-       // cond:
-       // result: (NOTL x)
+       // match: (LEAL [c] {s} (ADDL x y))
+       // cond: x.Op != OpSB && y.Op != OpSB
+       // result: (LEAL1 [c] {s} x y)
        for {
-               x := v.Args[0]
-               v.reset(Op386NOTL)
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               if !(x.Op != OpSB && y.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL1)
+               v.AuxInt = c
+               v.Aux = s
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpCom32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com32 x)
-       // cond:
-       // result: (NOTL x)
+       // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
        for {
-               x := v.Args[0]
-               v.reset(Op386NOTL)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386LEAL)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(x)
                return true
        }
-}
-func rewriteValue386_OpCom8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com8  x)
-       // cond:
-       // result: (NOTL x)
+       // match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               x := v.Args[0]
-               v.reset(Op386NOTL)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386LEAL1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpConst16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const16  [val])
-       // cond:
-       // result: (MOVLconst [val])
+       // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               val := v.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL2 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386LEAL2)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpConst32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32  [val])
-       // cond:
-       // result: (MOVLconst [val])
+       // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               val := v.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL4 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386LEAL4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32F [val])
-       // cond:
-       // result: (MOVSSconst [val])
+       // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               val := v.AuxInt
-               v.reset(Op386MOVSSconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL8 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386LEAL8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
+func rewriteValue386_Op386LEAL1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const64F [val])
-       // cond:
-       // result: (MOVSDconst [val])
-       for {
-               val := v.AuxInt
-               v.reset(Op386MOVSDconst)
-               v.AuxInt = val
-               return true
-       }
-}
-func rewriteValue386_OpConst8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const8   [val])
-       // cond:
-       // result: (MOVLconst [val])
-       for {
-               val := v.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = val
-               return true
-       }
-}
-func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ConstBool [b])
-       // cond:
-       // result: (MOVLconst [b])
+       // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
+       // cond: is32Bit(c+d)   && x.Op != OpSB
+       // result: (LEAL1 [c+d] {s} x y)
        for {
-               b := v.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = b
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(c+d) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL1)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ConstNil)
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (LEAL1 [c] {s} x (ADDLconst [d] y))
+       // cond: is32Bit(c+d)   && y.Op != OpSB
+       // result: (LEAL1 [c+d] {s} x y)
        for {
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               y := v_1.Args[0]
+               if !(is32Bit(c+d) && y.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL1)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpConvert(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Convert <t> x mem)
+       // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
        // cond:
-       // result: (MOVLconvert <t> x mem)
+       // result: (LEAL2 [c] {s} x y)
        for {
-               t := v.Type
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               mem := v.Args[1]
-               v.reset(Op386MOVLconvert)
-               v.Type = t
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHLLconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(Op386LEAL2)
+               v.AuxInt = c
+               v.Aux = s
                v.AddArg(x)
-               v.AddArg(mem)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto32 x)
+       // match: (LEAL1 [c] {s} (SHLLconst [1] x) y)
        // cond:
-       // result: (CVTTSS2SL x)
+       // result: (LEAL2 [c] {s} y x)
        for {
-               x := v.Args[0]
-               v.reset(Op386CVTTSS2SL)
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386SHLLconst {
+                       break
+               }
+               if v_0.AuxInt != 1 {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v.Args[1]
+               v.reset(Op386LEAL2)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(y)
                v.AddArg(x)
                return true
        }
-}
-func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto64F x)
+       // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
        // cond:
-       // result: (CVTSS2SD x)
+       // result: (LEAL4 [c] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               v.reset(Op386CVTSS2SD)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHLLconst {
+                       break
+               }
+               if v_1.AuxInt != 2 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(Op386LEAL4)
+               v.AuxInt = c
+               v.Aux = s
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32to32F x)
+       // match: (LEAL1 [c] {s} (SHLLconst [2] x) y)
        // cond:
-       // result: (CVTSL2SS x)
+       // result: (LEAL4 [c] {s} y x)
        for {
-               x := v.Args[0]
-               v.reset(Op386CVTSL2SS)
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386SHLLconst {
+                       break
+               }
+               if v_0.AuxInt != 2 {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v.Args[1]
+               v.reset(Op386LEAL4)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(y)
                v.AddArg(x)
                return true
        }
-}
-func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32to64F x)
+       // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
        // cond:
-       // result: (CVTSL2SD x)
+       // result: (LEAL8 [c] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               v.reset(Op386CVTSL2SD)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHLLconst {
+                       break
+               }
+               if v_1.AuxInt != 3 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(Op386LEAL8)
+               v.AuxInt = c
+               v.Aux = s
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32 x)
+       // match: (LEAL1 [c] {s} (SHLLconst [3] x) y)
        // cond:
-       // result: (CVTTSD2SL x)
+       // result: (LEAL8 [c] {s} y x)
        for {
-               x := v.Args[0]
-               v.reset(Op386CVTTSD2SL)
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386SHLLconst {
+                       break
+               }
+               if v_0.AuxInt != 3 {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v.Args[1]
+               v.reset(Op386LEAL8)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(y)
                v.AddArg(x)
                return true
        }
-}
-func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32F x)
-       // cond:
-       // result: (CVTSD2SS x)
+       // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+       // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               x := v.Args[0]
-               v.reset(Op386CVTSD2SS)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpDeferCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (DeferCall [argwid] mem)
-       // cond:
-       // result: (CALLdefer [argwid] mem)
+       // match: (LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
+       // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(Op386CALLdefer)
-               v.AuxInt = argwid
-               v.AddArg(mem)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               y := v_1.Args[0]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
+func rewriteValue386_Op386LEAL2(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div16  x y)
-       // cond:
-       // result: (DIVW  x y)
+       // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
+       // cond: is32Bit(c+d)   && x.Op != OpSB
+       // result: (LEAL2 [c+d] {s} x y)
        for {
-               x := v.Args[0]
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
                y := v.Args[1]
-               v.reset(Op386DIVW)
+               if !(is32Bit(c+d) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL2)
+               v.AuxInt = c + d
+               v.Aux = s
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div16u x y)
-       // cond:
-       // result: (DIVWU x y)
+       // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
+       // cond: is32Bit(c+2*d) && y.Op != OpSB
+       // result: (LEAL2 [c+2*d] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386DIVWU)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               y := v_1.Args[0]
+               if !(is32Bit(c+2*d) && y.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL2)
+               v.AuxInt = c + 2*d
+               v.Aux = s
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32  x y)
+       // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
        // cond:
-       // result: (DIVL  x y)
+       // result: (LEAL4 [c] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386DIVL)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHLLconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(Op386LEAL4)
+               v.AuxInt = c
+               v.Aux = s
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32F x y)
+       // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
        // cond:
-       // result: (DIVSS x y)
+       // result: (LEAL8 [c] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386DIVSS)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHLLconst {
+                       break
+               }
+               if v_1.AuxInt != 2 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(Op386LEAL8)
+               v.AuxInt = c
+               v.Aux = s
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32u x y)
-       // cond:
-       // result: (DIVLU x y)
+       // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+       // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               x := v.Args[0]
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
                y := v.Args[1]
-               v.reset(Op386DIVLU)
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL2)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
+func rewriteValue386_Op386LEAL4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div64F x y)
-       // cond:
-       // result: (DIVSD x y)
+       // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
+       // cond: is32Bit(c+d)   && x.Op != OpSB
+       // result: (LEAL4 [c+d] {s} x y)
        for {
-               x := v.Args[0]
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
                y := v.Args[1]
-               v.reset(Op386DIVSD)
+               if !(is32Bit(c+d) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL4)
+               v.AuxInt = c + d
+               v.Aux = s
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div8   x y)
-       // cond:
-       // result: (DIVW  (SignExt8to16 x) (SignExt8to16 y))
+       // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
+       // cond: is32Bit(c+4*d) && y.Op != OpSB
+       // result: (LEAL4 [c+4*d] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386DIVW)
-               v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               y := v_1.Args[0]
+               if !(is32Bit(c+4*d) && y.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL4)
+               v.AuxInt = c + 4*d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div8u  x y)
+       // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
        // cond:
-       // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+       // result: (LEAL8 [c] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386DIVWU)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHLLconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(Op386LEAL8)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpEq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq16  x y)
-       // cond:
-       // result: (SETEQ (CMPW x y))
+       // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+       // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETEQ)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValue386_OpEq32(v *Value, config *Config) bool {
+func rewriteValue386_Op386LEAL8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq32  x y)
-       // cond:
-       // result: (SETEQ (CMPL x y))
+       // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
+       // cond: is32Bit(c+d)   && x.Op != OpSB
+       // result: (LEAL8 [c+d] {s} x y)
        for {
-               x := v.Args[0]
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
                y := v.Args[1]
-               v.reset(Op386SETEQ)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if !(is32Bit(c+d) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL8)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq32F x y)
-       // cond:
-       // result: (SETEQF (UCOMISS x y))
+       // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
+       // cond: is32Bit(c+8*d) && y.Op != OpSB
+       // result: (LEAL8 [c+8*d] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETEQF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               y := v_1.Args[0]
+               if !(is32Bit(c+8*d) && y.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL8)
+               v.AuxInt = c + 8*d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq64F x y)
-       // cond:
-       // result: (SETEQF (UCOMISD x y))
+       // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+       // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               x := v.Args[0]
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
                y := v.Args[1]
-               v.reset(Op386SETEQF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386LEAL8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValue386_OpEq8(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBLSX(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq8   x y)
-       // cond:
-       // result: (SETEQ (CMPB x y))
+       // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETEQ)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               if x.Op != Op386MOVBload {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               mem := x.Args[1]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, Op386MOVBLSXload, v.Type)
+               v.reset(OpCopy)
                v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpEqB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (EqB   x y)
-       // cond:
-       // result: (SETEQ (CMPB x y))
+       // match: (MOVBLSX (ANDLconst [c] x))
+       // cond: c & 0x80 == 0
+       // result: (ANDLconst [c & 0x7f] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETEQ)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ANDLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(c&0x80 == 0) {
+                       break
+               }
+               v.reset(Op386ANDLconst)
+               v.AuxInt = c & 0x7f
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBLSXload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (EqPtr x y)
-       // cond:
-       // result: (SETEQ (CMPL x y))
+       // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETEQ)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVBLSXload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBLZX(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq16  x y)
-       // cond:
-       // result: (SETGE (CMPW x y))
+       // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGE)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               if x.Op != Op386MOVBload {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               mem := x.Args[1]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, Op386MOVBload, v.Type)
+               v.reset(OpCopy)
                v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq16U x y)
-       // cond:
-       // result: (SETAE (CMPW x y))
+       // match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETAE)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               if x.Op != Op386MOVBloadidx1 {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               idx := x.Args[1]
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, Op386MOVBloadidx1, v.Type)
+               v.reset(OpCopy)
                v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32  x y)
+       // match: (MOVBLZX (ANDLconst [c] x))
        // cond:
-       // result: (SETGE (CMPL x y))
+       // result: (ANDLconst [c & 0xff] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGE)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ANDLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(Op386ANDLconst)
+               v.AuxInt = c & 0xff
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq32F x y)
-       // cond:
-       // result: (SETGEF (UCOMISS x y))
+       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGEF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVBstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32U x y)
-       // cond:
-       // result: (SETAE (CMPL x y))
+       // match: (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVBload  [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETAE)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(Op386MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq64F x y)
-       // cond:
-       // result: (SETGEF (UCOMISD x y))
+       // match: (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGEF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq8   x y)
-       // cond:
-       // result: (SETGE (CMPB x y))
+       // match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGE)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVBloadidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq8U  x y)
-       // cond:
-       // result: (SETAE (CMPB x y))
+       // match: (MOVBload [off] {sym} (ADDL ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVBloadidx1 [off] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETAE)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386MOVBloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBloadidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GetClosurePtr)
+       // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (LoweredGetClosurePtr)
+       // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               v.reset(Op386LoweredGetClosurePtr)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVBloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGetG(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GetG mem)
+       // match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
        // cond:
-       // result: (LoweredGetG mem)
+       // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               mem := v.Args[0]
-               v.reset(Op386LoweredGetG)
-               v.AddArg(mem)
-               return true
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVBloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
        }
+       return false
 }
-func rewriteValue386_OpGoCall(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GoCall [argwid] mem)
+       // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
        // cond:
-       // result: (CALLgo [argwid] mem)
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(Op386CALLgo)
-               v.AuxInt = argwid
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVBLSX {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
                v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater16  x y)
+       // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
        // cond:
-       // result: (SETG (CMPW x y))
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETG)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVBLZX {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater16U x y)
-       // cond:
-       // result: (SETA (CMPW x y))
+       // match: (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVBstore  [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETA)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(Op386MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32  x y)
-       // cond:
-       // result: (SETG (CMPL x y))
+       // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+       // cond: validOff(off)
+       // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETG)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(validOff(off)) {
+                       break
+               }
+               v.reset(Op386MOVBstoreconst)
+               v.AuxInt = makeValAndOff(int64(int8(c)), off)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32F x y)
-       // cond:
-       // result: (SETGF (UCOMISS x y))
+       // match: (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32U x y)
-       // cond:
-       // result: (SETA (CMPL x y))
+       // match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETA)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVBstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater64F x y)
-       // cond:
-       // result: (SETGF (UCOMISD x y))
+       // match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386MOVBstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater8   x y)
-       // cond:
-       // result: (SETG (CMPB x y))
+       // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVWstore [i-1] {s} p w mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETG)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHRLconst {
+                       break
+               }
+               if v_1.AuxInt != 8 {
+                       break
+               }
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != Op386MOVBstore {
+                       break
+               }
+               if x.AuxInt != i-1 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if w != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVWstore)
+               v.AuxInt = i - 1
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(w)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater8U  x y)
-       // cond:
-       // result: (SETA (CMPB x y))
+       // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVWstore [i-1] {s} p w0 mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETA)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHRLconst {
+                       break
+               }
+               j := v_1.AuxInt
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != Op386MOVBstore {
+                       break
+               }
+               if x.AuxInt != i-1 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               w0 := x.Args[1]
+               if w0.Op != Op386SHRLconst {
+                       break
+               }
+               if w0.AuxInt != j-8 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVWstore)
+               v.AuxInt = i - 1
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(w0)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpHmul16(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBstoreconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul16  x y)
-       // cond:
-       // result: (HMULW  x y)
+       // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+       // cond: ValAndOff(sc).canAdd(off)
+       // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386HMULW)
-               v.AddArg(x)
-               v.AddArg(y)
+               sc := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               off := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(ValAndOff(sc).canAdd(off)) {
+                       break
+               }
+               v.reset(Op386MOVBstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = s
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpHmul16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul16u x y)
-       // cond:
-       // result: (HMULWU x y)
+       // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+       // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386HMULWU)
-               v.AddArg(x)
-               v.AddArg(y)
+               sc := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+                       break
+               }
+               v.reset(Op386MOVBstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul32  x y)
-       // cond:
-       // result: (HMULL  x y)
+       // match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386HMULL)
-               v.AddArg(x)
-               v.AddArg(y)
+               x := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL1 {
+                       break
+               }
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVBstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul32u x y)
+       // match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem)
        // cond:
-       // result: (HMULLU x y)
+       // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386HMULLU)
-               v.AddArg(x)
-               v.AddArg(y)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               v.reset(Op386MOVBstoreconstidx1)
+               v.AuxInt = x
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpHmul8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul8   x y)
-       // cond:
-       // result: (HMULB  x y)
+       // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386HMULB)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValue386_OpHmul8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul8u  x y)
-       // cond:
-       // result: (HMULBU x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386HMULBU)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               x := v.Args[1]
+               if x.Op != Op386MOVBstoreconst {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               mem := x.Args[1]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVWstoreconst)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBstoreconstidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (InterCall [argwid] entry mem)
+       // match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
        // cond:
-       // result: (CALLinter [argwid] entry mem)
+       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               mem := v.Args[1]
-               v.reset(Op386CALLinter)
-               v.AuxInt = argwid
-               v.AddArg(entry)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVBstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsInBounds idx len)
+       // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
        // cond:
-       // result: (SETB (CMPL idx len))
+       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(Op386SETB)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVBstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsNonNil p)
-       // cond:
-       // result: (SETNE (TESTL p p))
+       // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
        for {
+               c := v.AuxInt
+               s := v.Aux
                p := v.Args[0]
-               v.reset(Op386SETNE)
-               v0 := b.NewValue0(v.Line, Op386TESTL, TypeFlags)
-               v0.AddArg(p)
-               v0.AddArg(p)
-               v.AddArg(v0)
+               i := v.Args[1]
+               x := v.Args[2]
+               if x.Op != Op386MOVBstoreconstidx1 {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if i != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVWstoreconstidx1)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(i)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVBstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsSliceInBounds idx len)
+       // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
        // cond:
-       // result: (SETBE (CMPL idx len))
-       for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(Op386SETBE)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValue386_Op386LEAL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LEAL [c] {s} (ADDLconst [d] x))
-       // cond: is32Bit(c+d)
-       // result: (LEAL [c+d] {s} x)
+       // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
                c := v.AuxInt
-               s := v.Aux
+               sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDLconst {
                        break
                }
                d := v_0.AuxInt
-               x := v_0.Args[0]
-               if !(is32Bit(c + d)) {
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVBstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // cond:
+       // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+       for {
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               v.reset(Op386LEAL)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVBstoreidx1)
                v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL [c] {s} (ADDL x y))
-       // cond: x.Op != OpSB && y.Op != OpSB
-       // result: (LEAL1 [c] {s} x y)
+       // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
        for {
-               c := v.AuxInt
+               i := v.AuxInt
                s := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != Op386SHRLconst {
                        break
                }
-               x := v_0.Args[0]
-               y := v_0.Args[1]
-               if !(x.Op != OpSB && y.Op != OpSB) {
+               if v_2.AuxInt != 8 {
                        break
                }
-               v.reset(Op386LEAL1)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-       // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != Op386MOVBstoreidx1 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if x.AuxInt != i-1 {
                        break
                }
-               v.reset(Op386LEAL)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               return true
-       }
-       // match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               if w != x.Args[2] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVWstoreidx1)
+               v.AuxInt = i - 1
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(idx)
+               v.AddArg(w)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
+       for {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != Op386SHRLconst {
+                       break
+               }
+               j := v_2.AuxInt
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != Op386MOVBstoreidx1 {
+                       break
+               }
+               if x.AuxInt != i-1 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               w0 := x.Args[2]
+               if w0.Op != Op386SHRLconst {
+                       break
+               }
+               if w0.AuxInt != j-8 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVWstoreidx1)
+               v.AuxInt = i - 1
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(idx)
+               v.AddArg(w0)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValue386_Op386MOVLload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVLload  [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
                        break
                }
                off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v_0.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               v.reset(Op386LEAL1)
+               v.reset(Op386MOVLload)
                v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
+       // match: (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL2 {
+               if v_0.Op != Op386LEAL {
                        break
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v_0.Args[1]
+               base := v_0.Args[0]
+               mem := v.Args[1]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386LEAL2)
+               v.reset(Op386MOVLload)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
+       // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL4 {
+               if v_0.Op != Op386LEAL1 {
                        break
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v_0.Args[1]
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386LEAL4)
+               v.reset(Op386MOVLloadidx1)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
+       // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL8 {
+               if v_0.Op != Op386LEAL4 {
                        break
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v_0.Args[1]
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386LEAL8)
+               v.reset(Op386MOVLloadidx4)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386LEAL1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
-       // cond: is32Bit(c+d)   && x.Op != OpSB
-       // result: (LEAL1 [c+d] {s} x y)
+       // match: (MOVLload [off] {sym} (ADDL ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVLloadidx1 [off] {sym} ptr idx mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               off := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386ADDL {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(c+d) && x.Op != OpSB) {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
                        break
                }
-               v.reset(Op386LEAL1)
-               v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(Op386MOVLloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [c] {s} x (ADDLconst [d] y))
-       // cond: is32Bit(c+d)   && y.Op != OpSB
-       // result: (LEAL1 [c+d] {s} x y)
+       return false
+}
+func rewriteValue386_Op386MOVLloadidx1(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
+       // cond:
+       // result: (MOVLloadidx4 [c] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v_1.Op != Op386SHLLconst {
                        break
                }
-               d := v_1.AuxInt
-               y := v_1.Args[0]
-               if !(is32Bit(c+d) && y.Op != OpSB) {
+               if v_1.AuxInt != 2 {
                        break
                }
-               v.reset(Op386LEAL1)
-               v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVLloadidx4)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
+       // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (LEAL2 [c] {s} x y)
+       // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               if v_1.AuxInt != 1 {
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVLloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // cond:
+       // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+       for {
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(Op386LEAL2)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVLloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [c] {s} (SHLLconst [1] x) y)
+       return false
+}
+func rewriteValue386_Op386MOVLloadidx4(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (LEAL2 [c] {s} y x)
+       // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386SHLLconst {
-                       break
-               }
-               if v_0.AuxInt != 1 {
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               x := v_0.Args[0]
-               y := v.Args[1]
-               v.reset(Op386LEAL2)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(y)
-               v.AddArg(x)
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVLloadidx4)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
+       // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
        // cond:
-       // result: (LEAL4 [c] {s} x y)
+       // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
-                       break
-               }
-               if v_1.AuxInt != 2 {
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(Op386LEAL4)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVLloadidx4)
+               v.AuxInt = c + 4*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [c] {s} (SHLLconst [2] x) y)
-       // cond:
-       // result: (LEAL4 [c] {s} y x)
+       return false
+}
+func rewriteValue386_Op386MOVLstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVLstore  [off1+off2] {sym} ptr val mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               off1 := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386SHLLconst {
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               if v_0.AuxInt != 2 {
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               x := v_0.Args[0]
-               y := v.Args[1]
-               v.reset(Op386LEAL4)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(y)
-               v.AddArg(x)
+               v.reset(Op386MOVLstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
-       // cond:
-       // result: (LEAL8 [c] {s} x y)
+       // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+       // cond: validOff(off)
+       // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               if v_1.AuxInt != 3 {
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(validOff(off)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(Op386LEAL8)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(Op386MOVLstoreconst)
+               v.AuxInt = makeValAndOff(int64(int32(c)), off)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [c] {s} (SHLLconst [3] x) y)
-       // cond:
-       // result: (LEAL8 [c] {s} y x)
+       // match: (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386SHLLconst {
+               if v_0.Op != Op386LEAL {
                        break
                }
-               if v_0.AuxInt != 3 {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               x := v_0.Args[0]
-               y := v.Args[1]
-               v.reset(Op386LEAL8)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(y)
-               v.AddArg(x)
+               v.reset(Op386MOVLstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-       // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               if v_0.Op != Op386LEAL1 {
                        break
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386LEAL1)
+               v.reset(Op386MOVLstoreidx1)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y))
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
-       // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386LEAL {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL4 {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               y := v_1.Args[0]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386LEAL1)
+               v.reset(Op386MOVLstoreidx4)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386LEAL2(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
-       // cond: is32Bit(c+d)   && x.Op != OpSB
-       // result: (LEAL2 [c+d] {s} x y)
+       // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               off := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(c+d) && x.Op != OpSB) {
-                       break
-               }
-               v.reset(Op386LEAL2)
-               v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-       // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
-       // cond: is32Bit(c+2*d) && y.Op != OpSB
-       // result: (LEAL2 [c+2*d] {s} x y)
-       for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v_0.Op != Op386ADDL {
                        break
                }
-               d := v_1.AuxInt
-               y := v_1.Args[0]
-               if !(is32Bit(c+2*d) && y.Op != OpSB) {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
                        break
                }
-               v.reset(Op386LEAL2)
-               v.AuxInt = c + 2*d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(Op386MOVLstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
-       // cond:
-       // result: (LEAL4 [c] {s} x y)
+       return false
+}
+func rewriteValue386_Op386MOVLstoreconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+       // cond: ValAndOff(sc).canAdd(off)
+       // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
        for {
-               c := v.AuxInt
+               sc := v.AuxInt
                s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               if v_1.AuxInt != 1 {
+               off := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(ValAndOff(sc).canAdd(off)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(Op386LEAL4)
-               v.AuxInt = c
+               v.reset(Op386MOVLstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
                v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
-       // cond:
-       // result: (LEAL8 [c] {s} x y)
+       // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+       // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
+               sc := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
                        break
                }
-               if v_1.AuxInt != 2 {
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(Op386LEAL8)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(Op386MOVLstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-       // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               off1 := v.AuxInt
+               x := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               if v_0.Op != Op386LEAL1 {
                        break
                }
-               off2 := v_0.AuxInt
+               off := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386LEAL2)
-               v.AuxInt = off1 + off2
+               v.reset(Op386MOVLstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(off)
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386LEAL4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
-       // cond: is32Bit(c+d)   && x.Op != OpSB
-       // result: (LEAL4 [c+d] {s} x y)
+       // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               x := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386LEAL4 {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(c+d) && x.Op != OpSB) {
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386LEAL4)
-               v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(Op386MOVLstoreconstidx4)
+               v.AuxInt = ValAndOff(x).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
-       // cond: is32Bit(c+4*d) && y.Op != OpSB
-       // result: (LEAL4 [c+4*d] {s} x y)
+       // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem)
+       // cond:
+       // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
-                       break
-               }
-               d := v_1.AuxInt
-               y := v_1.Args[0]
-               if !(is32Bit(c+4*d) && y.Op != OpSB) {
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
                        break
                }
-               v.reset(Op386LEAL4)
-               v.AuxInt = c + 4*d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               v.reset(Op386MOVLstoreconstidx1)
+               v.AuxInt = x
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
+       return false
+}
+func rewriteValue386_Op386MOVLstoreconstidx1(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
        // cond:
-       // result: (LEAL8 [c] {s} x y)
+       // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != Op386SHLLconst {
                        break
                }
-               if v_1.AuxInt != 1 {
+               if v_1.AuxInt != 2 {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(Op386LEAL8)
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVLstoreconstidx4)
                v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-       // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
+       // cond:
+       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               x := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVLstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
+       // cond:
+       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       for {
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               v.reset(Op386LEAL4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVLstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_Op386LEAL8(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVLstoreconstidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
-       // cond: is32Bit(c+d)   && x.Op != OpSB
-       // result: (LEAL8 [c+d] {s} x y)
+       // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem)
+       // cond:
+       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               x := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDLconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(c+d) && x.Op != OpSB) {
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVLstoreconstidx4)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem)
+       // cond:
+       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
+       for {
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               v.reset(Op386LEAL8)
-               v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVLstoreconstidx4)
+               v.AuxInt = ValAndOff(x).add(4 * c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
-       // cond: is32Bit(c+8*d) && y.Op != OpSB
-       // result: (LEAL8 [c+8*d] {s} x y)
+       return false
+}
+func rewriteValue386_Op386MOVLstoreidx1(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem)
+       // cond:
+       // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v_1.Op != Op386SHLLconst {
                        break
                }
-               d := v_1.AuxInt
-               y := v_1.Args[0]
-               if !(is32Bit(c+8*d) && y.Op != OpSB) {
+               if v_1.AuxInt != 2 {
                        break
                }
-               v.reset(Op386LEAL8)
-               v.AuxInt = c + 8*d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVLstoreidx4)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-       // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+       // cond:
+       // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVLstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // cond:
+       // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+       for {
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               v.reset(Op386LEAL8)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVLstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVLstoreidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq16  x y)
+       // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
        // cond:
-       // result: (SETLE (CMPW x y))
+       // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETLE)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVLstoreidx4)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq16U x y)
+       // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
        // cond:
-       // result: (SETBE (CMPW x y))
+       // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETBE)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVLstoreidx4)
+               v.AuxInt = c + 4*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq32  x y)
-       // cond:
-       // result: (SETLE (CMPL x y))
+       // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVSDload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETLE)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(Op386MOVSDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32F x y)
-       // cond:
-       // result: (SETGEF (UCOMISS y x))
+       // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGEF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVSDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32U x y)
-       // cond:
-       // result: (SETBE (CMPL x y))
+       // match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETBE)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVSDloadidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq64F x y)
-       // cond:
-       // result: (SETGEF (UCOMISD y x))
+       // match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGEF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL8 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVSDloadidx8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8   x y)
-       // cond:
-       // result: (SETLE (CMPB x y))
+       // match: (MOVSDload [off] {sym} (ADDL ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETLE)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386MOVSDloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDloadidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq8U  x y)
+       // match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (SETBE (CMPB x y))
+       // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETBE)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVSDloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLess16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less16  x y)
+       // match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
        // cond:
-       // result: (SETL (CMPW x y))
+       // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETL)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVSDloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDloadidx8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less16U x y)
+       // match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (SETB (CMPW x y))
+       // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETB)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValue386_OpLess32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32  x y)
-       // cond:
-       // result: (SETL (CMPL x y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETL)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32F x y)
-       // cond:
-       // result: (SETGF (UCOMISS y x))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32U x y)
-       // cond:
-       // result: (SETB (CMPL x y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETB)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less64F x y)
-       // cond:
-       // result: (SETGF (UCOMISD y x))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETGF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValue386_OpLess8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less8   x y)
-       // cond:
-       // result: (SETL (CMPB x y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETL)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVSDloadidx8)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less8U  x y)
+       // match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem)
        // cond:
-       // result: (SETB (CMPB x y))
+       // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETB)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVSDloadidx8)
+               v.AuxInt = c + 8*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpLoad(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Load <t> ptr mem)
-       // cond: (is32BitInt(t) || isPtr(t))
-       // result: (MOVLload ptr mem)
+       // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitInt(t) || isPtr(t)) {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               v.reset(Op386MOVLload)
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(Op386MOVSDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is16BitInt(t)
-       // result: (MOVWload ptr mem)
+       // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t)) {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
                        break
                }
-               v.reset(Op386MOVWload)
-               v.AddArg(ptr)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVSDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (t.IsBoolean() || is8BitInt(t))
-       // result: (MOVBload ptr mem)
+       // match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(t.IsBoolean() || is8BitInt(t)) {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL1 {
                        break
                }
-               v.reset(Op386MOVBload)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVSDstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is32BitFloat(t)
-       // result: (MOVSSload ptr mem)
+       // match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitFloat(t)) {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL8 {
                        break
                }
-               v.reset(Op386MOVSSload)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVSDstoreidx8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is64BitFloat(t)
-       // result: (MOVSDload ptr mem)
+       // match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is64BitFloat(t)) {
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
                        break
                }
-               v.reset(Op386MOVSDload)
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386MOVSDstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_OpLrot16(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot16 <t> x [c])
+       // match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
        // cond:
-       // result: (ROLWconst <t> [c&15] x)
+       // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
                c := v.AuxInt
-               v.reset(Op386ROLWconst)
-               v.Type = t
-               v.AuxInt = c & 15
-               v.AddArg(x)
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVSDstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLrot32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot32 <t> x [c])
+       // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
        // cond:
-       // result: (ROLLconst <t> [c&31] x)
+       // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
                c := v.AuxInt
-               v.reset(Op386ROLLconst)
-               v.Type = t
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVSDstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpLrot8(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSDstoreidx8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot8  <t> x [c])
+       // match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)
        // cond:
-       // result: (ROLBconst <t> [c&7] x)
+       // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
                c := v.AuxInt
-               v.reset(Op386ROLBconst)
-               v.Type = t
-               v.AuxInt = c & 7
-               v.AddArg(x)
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVSDstoreidx8)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x16 <t> x y)
+       // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+       // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVSDstoreidx8)
+               v.AuxInt = c + 8*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x32 <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+       // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVSSload [off1+off2] {sym} ptr mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(Op386MOVSSload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SHLLconst x [c])
+       // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386SHLLconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               v.reset(Op386MOVSSload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-       // match: (Lsh16x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (Const16 [0])
+       // match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL1 {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpConst16)
-               v.AuxInt = 0
+               v.reset(Op386MOVSSloadidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x8  <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+       // match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
-               return true
-       }
-}
-func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x16 <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-       for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
-               return true
-       }
-}
-func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x32 <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-       for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
-               return true
-       }
-}
-func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SHLLconst x [c])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL4 {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386SHLLconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               v.reset(Op386MOVSSloadidx4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (Lsh32x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (Const32 [0])
+       // match: (MOVSSload [off] {sym} (ADDL ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
                        break
                }
-               v.reset(OpConst32)
-               v.AuxInt = 0
+               v.reset(Op386MOVSSloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSloadidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x8  <t> x y)
+       // match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+       // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVSSloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x16 <t> x y)
+       // match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+       // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVSSloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSloadidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x32 <t> x y)
+       // match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+       // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386MOVSSloadidx4)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SHLLconst x [c])
+       // match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // cond:
+       // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               v.reset(Op386SHLLconst)
-               v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (Lsh8x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (Const8 [0])
-       for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
-                       break
-               }
-               v.reset(OpConst8)
-               v.AuxInt = 0
-               return true
-       }
-       return false
-}
-func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x8  <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-       for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
-               return true
-       }
-}
-func rewriteValue386_Op386MOVBLSX(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
-       for {
-               x := v.Args[0]
-               if x.Op != Op386MOVBload {
-                       break
-               }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               mem := x.Args[1]
-               if !(x.Uses == 1 && clobber(x)) {
-                       break
-               }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, Op386MOVBLSXload, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
-               return true
-       }
-       // match: (MOVBLSX (ANDLconst [c] x))
-       // cond: c & 0x80 == 0
-       // result: (ANDLconst [c & 0x7f] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ANDLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               if !(c&0x80 == 0) {
-                       break
-               }
-               v.reset(Op386ANDLconst)
-               v.AuxInt = c & 0x7f
-               v.AddArg(x)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVBLSXload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(Op386MOVBLSXload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVSSloadidx4)
+               v.AuxInt = c + 4*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVBLZX(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
-       for {
-               x := v.Args[0]
-               if x.Op != Op386MOVBload {
-                       break
-               }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               mem := x.Args[1]
-               if !(x.Uses == 1 && clobber(x)) {
-                       break
-               }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, Op386MOVBload, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
-               return true
-       }
-       // match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
-       for {
-               x := v.Args[0]
-               if x.Op != Op386MOVBloadidx1 {
-                       break
-               }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               idx := x.Args[1]
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
-                       break
-               }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, Op386MOVBloadidx1, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
-               return true
-       }
-       // match: (MOVBLZX (ANDLconst [c] x))
-       // cond:
-       // result: (ANDLconst [c & 0xff] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ANDLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(Op386ANDLconst)
-               v.AuxInt = c & 0xff
-               v.AddArg(x)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVBload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVBstore {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem)
+       // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
        // cond: is32Bit(off1+off2)
-       // result: (MOVBload  [off1+off2] {sym} ptr mem)
+       // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
        for {
                off1 := v.AuxInt
                sym := v.Aux
@@ -4552,20 +4924,22 @@ func rewriteValue386_Op386MOVBload(v *Value, config *Config) bool {
                }
                off2 := v_0.AuxInt
                ptr := v_0.Args[0]
-               mem := v.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
                if !(is32Bit(off1 + off2)) {
                        break
                }
-               v.reset(Op386MOVBload)
+               v.reset(Op386MOVSSstore)
                v.AuxInt = off1 + off2
                v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+       // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
@@ -4576,20 +4950,22 @@ func rewriteValue386_Op386MOVBload(v *Value, config *Config) bool {
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
                base := v_0.Args[0]
-               mem := v.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVBload)
+               v.reset(Op386MOVSSstore)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(base)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+       // match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
@@ -4601,50 +4977,82 @@ func rewriteValue386_Op386MOVBload(v *Value, config *Config) bool {
                sym2 := v_0.Aux
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
-               mem := v.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVBloadidx1)
+               v.reset(Op386MOVSSstoreidx1)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBload [off] {sym} (ADDL ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVBloadidx1 [off] {sym} ptr idx mem)
+       // match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               off := v.AuxInt
-               sym := v.Aux
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL4 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVSSstoreidx4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDL {
                        break
                }
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
-               mem := v.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
                if !(ptr.Op != OpSB) {
                        break
                }
-               v.reset(Op386MOVBloadidx1)
+               v.reset(Op386MOVSSstoreidx1)
                v.AuxInt = off
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVBloadidx1(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+       // match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
        // cond:
-       // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
                c := v.AuxInt
                sym := v.Aux
@@ -4655,18 +5063,20 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value, config *Config) bool {
                d := v_0.AuxInt
                ptr := v_0.Args[0]
                idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVBloadidx1)
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVSSstoreidx1)
                v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
        // cond:
-       // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
                c := v.AuxInt
                sym := v.Aux
@@ -4677,113 +5087,126 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value, config *Config) bool {
                }
                d := v_1.AuxInt
                idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVBloadidx1)
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVSSstoreidx1)
                v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVBstore(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVSSstoreidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
+       // match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
        // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
        for {
-               off := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVBLSX {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVBstore)
-               v.AuxInt = off
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVSSstoreidx4)
+               v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
-               v.AddArg(x)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
+       // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
        // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
        for {
-               off := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVBLZX {
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVBstore)
-               v.AuxInt = off
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVSSstoreidx4)
+               v.AuxInt = c + 4*d
                v.Aux = sym
                v.AddArg(ptr)
-               v.AddArg(x)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVBstore  [off1+off2] {sym} ptr val mem)
+       return false
+}
+func rewriteValue386_Op386MOVWLSX(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               x := v.Args[0]
+               if x.Op != Op386MOVWload {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               mem := x.Args[1]
+               if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(Op386MOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               b = x.Block
+               v0 := b.NewValue0(v.Line, Op386MOVWLSXload, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
-       // cond: validOff(off)
-       // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+       // match: (MOVWLSX (ANDLconst [c] x))
+       // cond: c & 0x8000 == 0
+       // result: (ANDLconst [c & 0x7fff] x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ANDLconst {
                        break
                }
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(validOff(off)) {
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(c&0x8000 == 0) {
                        break
                }
-               v.reset(Op386MOVBstoreconst)
-               v.AuxInt = makeValAndOff(int64(int8(c)), off)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(Op386ANDLconst)
+               v.AuxInt = c & 0x7fff
+               v.AddArg(x)
                return true
        }
-       // match: (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+       return false
+}
+func rewriteValue386_Op386MOVWLSXload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
@@ -4794,247 +5217,248 @@ func rewriteValue386_Op386MOVBstore(v *Value, config *Config) bool {
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
                base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
+               mem := v.Args[1]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVBstore)
+               v.reset(Op386MOVWLSXload)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(base)
-               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       return false
+}
+func rewriteValue386_Op386MOVWLZX(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
+               x := v.Args[0]
+               if x.Op != Op386MOVWload {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               mem := x.Args[1]
+               if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(Op386MOVBstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               b = x.Block
+               v0 := b.NewValue0(v.Line, Op386MOVWload, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
+       // match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
+               x := v.Args[0]
+               if x.Op != Op386MOVWloadidx1 {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               idx := x.Args[1]
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(Op386MOVBstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               b = x.Block
+               v0 := b.NewValue0(v.Line, Op386MOVWloadidx1, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVWstore [i-1] {s} p w mem)
+       // match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHRLconst {
-                       break
-               }
-               if v_1.AuxInt != 8 {
-                       break
-               }
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != Op386MOVBstore {
-                       break
-               }
-               if x.AuxInt != i-1 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if w != x.Args[1] {
+               x := v.Args[0]
+               if x.Op != Op386MOVWloadidx2 {
                        break
                }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               idx := x.Args[1]
                mem := x.Args[2]
                if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(Op386MOVWstore)
-               v.AuxInt = i - 1
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w)
-               v.AddArg(mem)
+               b = x.Block
+               v0 := b.NewValue0(v.Line, Op386MOVWloadidx2, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVWstore [i-1] {s} p w0 mem)
+       // match: (MOVWLZX (ANDLconst [c] x))
+       // cond:
+       // result: (ANDLconst [c & 0xffff] x)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHRLconst {
-                       break
-               }
-               j := v_1.AuxInt
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != Op386MOVBstore {
-                       break
-               }
-               if x.AuxInt != i-1 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ANDLconst {
                        break
                }
-               w0 := x.Args[1]
-               if w0.Op != Op386SHRLconst {
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(Op386ANDLconst)
+               v.AuxInt = c & 0xffff
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValue386_Op386MOVWload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVWstore {
                        break
                }
-               if w0.AuxInt != j-8 {
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
                        break
                }
-               if w != w0.Args[0] {
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVWload  [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               v.reset(Op386MOVWstore)
-               v.AuxInt = i - 1
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w0)
+               v.reset(Op386MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVBstoreconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-       // cond: ValAndOff(sc).canAdd(off)
-       // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+       // match: (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               sc := v.AuxInt
-               s := v.Aux
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386LEAL {
                        break
                }
-               off := v_0.AuxInt
-               ptr := v_0.Args[0]
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
                mem := v.Args[1]
-               if !(ValAndOff(sc).canAdd(off)) {
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVBstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = s
-               v.AddArg(ptr)
+               v.reset(Op386MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-       // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+       // match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               sc := v.AuxInt
+               off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               if v_0.Op != Op386LEAL1 {
                        break
                }
-               off := v_0.AuxInt
+               off2 := v_0.AuxInt
                sym2 := v_0.Aux
                ptr := v_0.Args[0]
+               idx := v_0.Args[1]
                mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVBstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
+               v.reset(Op386MOVWloadidx1)
+               v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.AuxInt
+               off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
+               if v_0.Op != Op386LEAL2 {
                        break
                }
-               off := v_0.AuxInt
+               off2 := v_0.AuxInt
                sym2 := v_0.Aux
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
                mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVBstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(off)
+               v.reset(Op386MOVWloadidx2)
+               v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem)
-       // cond:
-       // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
+       // match: (MOVWload [off] {sym} (ADDL ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVWloadidx1 [off] {sym} ptr idx mem)
        for {
-               x := v.AuxInt
+               off := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDL {
@@ -5043,134 +5467,98 @@ func rewriteValue386_Op386MOVBstoreconst(v *Value, config *Config) bool {
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
                mem := v.Args[1]
-               v.reset(Op386MOVBstoreconstidx1)
-               v.AuxInt = x
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386MOVWloadidx1)
+               v.AuxInt = off
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+       return false
+}
+func rewriteValue386_Op386MOVWloadidx1(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
+       // cond:
+       // result: (MOVWloadidx2 [c] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               x := v.Args[1]
-               if x.Op != Op386MOVBstoreconst {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHLLconst {
                        break
                }
-               mem := x.Args[1]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+               if v_1.AuxInt != 1 {
                        break
                }
-               v.reset(Op386MOVWstoreconst)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVWloadidx2)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVBstoreconstidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
+       // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               x := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDLconst {
                        break
                }
-               c := v_0.AuxInt
+               d := v_0.AuxInt
                ptr := v_0.Args[0]
                idx := v.Args[1]
                mem := v.Args[2]
-               v.reset(Op386MOVBstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
+               v.reset(Op386MOVWloadidx1)
+               v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
+       // match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
        // cond:
-       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               x := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != Op386ADDLconst {
                        break
                }
-               c := v_1.AuxInt
+               d := v_1.AuxInt
                idx := v_1.Args[0]
                mem := v.Args[2]
-               v.reset(Op386MOVBstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
+               v.reset(Op386MOVWloadidx1)
+               v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
-       for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               i := v.Args[1]
-               x := v.Args[2]
-               if x.Op != Op386MOVBstoreconstidx1 {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if i != x.Args[1] {
-                       break
-               }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
-                       break
-               }
-               v.reset(Op386MOVWstoreconstidx1)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(i)
-               v.AddArg(mem)
-               return true
-       }
        return false
 }
-func rewriteValue386_Op386MOVBstoreidx1(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWloadidx2(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+       // match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem)
        // cond:
-       // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
        for {
                c := v.AuxInt
                sym := v.Aux
@@ -5181,20 +5569,18 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value, config *Config) bool {
                d := v_0.AuxInt
                ptr := v_0.Args[0]
                idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVBstoreidx1)
+               mem := v.Args[2]
+               v.reset(Op386MOVWloadidx2)
                v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
-               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem)
        // cond:
-       // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
        for {
                c := v.AuxInt
                sym := v.Aux
@@ -5205,84 +5591,238 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value, config *Config) bool {
                }
                d := v_1.AuxInt
                idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVBstoreidx1)
-               v.AuxInt = c + d
+               mem := v.Args[2]
+               v.reset(Op386MOVWloadidx2)
+               v.AuxInt = c + 2*d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
-               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
+       return false
+}
+func rewriteValue386_Op386MOVWstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
+       // cond:
+       // result: (MOVWstore [off] {sym} ptr x mem)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != Op386SHRLconst {
-                       break
-               }
-               if v_2.AuxInt != 8 {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVWLSX {
                        break
                }
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != Op386MOVBstoreidx1 {
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVWstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
+       // cond:
+       // result: (MOVWstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVWLZX {
                        break
                }
-               if x.AuxInt != i-1 {
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(Op386MOVWstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVWstore  [off1+off2] {sym} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               if x.Aux != s {
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(Op386MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+       // cond: validOff(off)
+       // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               if idx != x.Args[1] {
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(validOff(off)) {
                        break
                }
-               if w != x.Args[2] {
+               v.reset(Op386MOVWstoreconst)
+               v.AuxInt = makeValAndOff(int64(int16(c)), off)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVWstoreidx1)
-               v.AuxInt = i - 1
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(idx)
-               v.AddArg(w)
+               v.reset(Op386MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
+       // match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != Op386SHRLconst {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL1 {
                        break
                }
-               j := v_2.AuxInt
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != Op386MOVBstoreidx1 {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               if x.AuxInt != i-1 {
+               v.reset(Op386MOVWstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386LEAL2 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(Op386MOVWstoreidx2)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != Op386ADDL {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(Op386MOVWstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstore [i-2] {s} p w mem)
+       for {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHRLconst {
+                       break
+               }
+               if v_1.AuxInt != 16 {
+                       break
+               }
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != Op386MOVWstore {
+                       break
+               }
+               if x.AuxInt != i-2 {
                        break
                }
                if x.Aux != s {
@@ -5291,164 +5831,178 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value, config *Config) bool {
                if p != x.Args[0] {
                        break
                }
-               if idx != x.Args[1] {
+               if w != x.Args[1] {
                        break
                }
-               w0 := x.Args[2]
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVLstore)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(w)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstore [i-2] {s} p w0 mem)
+       for {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386SHRLconst {
+                       break
+               }
+               j := v_1.AuxInt
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != Op386MOVWstore {
+                       break
+               }
+               if x.AuxInt != i-2 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               w0 := x.Args[1]
                if w0.Op != Op386SHRLconst {
                        break
                }
-               if w0.AuxInt != j-8 {
+               if w0.AuxInt != j-16 {
                        break
                }
                if w != w0.Args[0] {
                        break
                }
-               mem := x.Args[3]
+               mem := x.Args[2]
                if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(Op386MOVWstoreidx1)
-               v.AuxInt = i - 1
+               v.reset(Op386MOVLstore)
+               v.AuxInt = i - 2
                v.Aux = s
                v.AddArg(p)
-               v.AddArg(idx)
                v.AddArg(w0)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVLload(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWstoreconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLstore {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVLload  [off1+off2] {sym} ptr mem)
+       // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+       // cond: ValAndOff(sc).canAdd(off)
+       // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               sc := v.AuxInt
+               s := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDLconst {
                        break
                }
-               off2 := v_0.AuxInt
+               off := v_0.AuxInt
                ptr := v_0.Args[0]
                mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               if !(ValAndOff(sc).canAdd(off)) {
                        break
                }
-               v.reset(Op386MOVLload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
+               v.reset(Op386MOVWstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = s
                v.AddArg(ptr)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+       // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
        for {
-               off1 := v.AuxInt
+               sc := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386LEAL {
                        break
                }
-               off2 := v_0.AuxInt
+               off := v_0.AuxInt
                sym2 := v_0.Aux
-               base := v_0.Args[0]
+               ptr := v_0.Args[0]
                mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
                        break
                }
-               v.reset(Op386MOVLload)
-               v.AuxInt = off1 + off2
+               v.reset(Op386MOVWstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
+               v.AddArg(ptr)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               off1 := v.AuxInt
+               x := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386LEAL1 {
                        break
                }
-               off2 := v_0.AuxInt
+               off := v_0.AuxInt
                sym2 := v_0.Aux
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
                mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVLloadidx1)
-               v.AuxInt = off1 + off2
+               v.reset(Op386MOVWstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(off)
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               off1 := v.AuxInt
+               x := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL4 {
+               if v_0.Op != Op386LEAL2 {
                        break
                }
-               off2 := v_0.AuxInt
+               off := v_0.AuxInt
                sym2 := v_0.Aux
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
                mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(Op386MOVLloadidx4)
-               v.AuxInt = off1 + off2
+               v.reset(Op386MOVWstoreconstidx2)
+               v.AuxInt = ValAndOff(x).add(off)
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLload [off] {sym} (ADDL ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVLloadidx1 [off] {sym} ptr idx mem)
+       // match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem)
+       // cond:
+       // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
        for {
-               off := v.AuxInt
+               x := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDL {
@@ -5457,25 +6011,51 @@ func rewriteValue386_Op386MOVLload(v *Value, config *Config) bool {
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
                mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
-                       break
-               }
-               v.reset(Op386MOVLloadidx1)
-               v.AuxInt = off
+               v.reset(Op386MOVWstoreconstidx1)
+               v.AuxInt = x
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
+       // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+       for {
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               x := v.Args[1]
+               if x.Op != Op386MOVWstoreconst {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               mem := x.Args[1]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVLstoreconst)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(mem)
+               return true
+       }
        return false
 }
-func rewriteValue386_Op386MOVLloadidx1(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWstoreconstidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
+       // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
        // cond:
-       // result: (MOVLloadidx4 [c] {sym} ptr idx mem)
+       // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
        for {
                c := v.AuxInt
                sym := v.Aux
@@ -5484,12 +6064,12 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value, config *Config) bool {
                if v_1.Op != Op386SHLLconst {
                        break
                }
-               if v_1.AuxInt != 2 {
+               if v_1.AuxInt != 1 {
                        break
                }
                idx := v_1.Args[0]
                mem := v.Args[2]
-               v.reset(Op386MOVLloadidx4)
+               v.reset(Op386MOVWstoreconstidx2)
                v.AuxInt = c
                v.Aux = sym
                v.AddArg(ptr)
@@ -5497,1776 +6077,1344 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value, config *Config) bool {
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+       // match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
        // cond:
-       // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               c := v.AuxInt
+               x := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDLconst {
                        break
                }
-               d := v_0.AuxInt
+               c := v_0.AuxInt
                ptr := v_0.Args[0]
                idx := v.Args[1]
                mem := v.Args[2]
-               v.reset(Op386MOVLloadidx1)
-               v.AuxInt = c + d
+               v.reset(Op386MOVWstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
        // cond:
-       // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               c := v.AuxInt
+               x := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != Op386ADDLconst {
                        break
                }
-               d := v_1.AuxInt
+               c := v_1.AuxInt
                idx := v_1.Args[0]
                mem := v.Args[2]
-               v.reset(Op386MOVLloadidx1)
-               v.AuxInt = c + d
+               v.reset(Op386MOVWstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
+       // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
+       for {
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               i := v.Args[1]
+               x := v.Args[2]
+               if x.Op != Op386MOVWstoreconstidx1 {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if i != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVLstoreconstidx1)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(i)
+               v.AddArg(mem)
+               return true
+       }
        return false
 }
-func rewriteValue386_Op386MOVLloadidx4(v *Value, config *Config) bool {
+func rewriteValue386_Op386MOVWstoreconstidx2(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
+       // match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem)
        // cond:
-       // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
+       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               c := v.AuxInt
+               x := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != Op386ADDLconst {
                        break
                }
-               d := v_0.AuxInt
+               c := v_0.AuxInt
                ptr := v_0.Args[0]
                idx := v.Args[1]
                mem := v.Args[2]
-               v.reset(Op386MOVLloadidx4)
-               v.AuxInt = c + d
+               v.reset(Op386MOVWstoreconstidx2)
+               v.AuxInt = ValAndOff(x).add(c)
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem)
        // cond:
-       // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
+       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
        for {
-               c := v.AuxInt
+               x := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != Op386ADDLconst {
                        break
                }
-               d := v_1.AuxInt
+               c := v_1.AuxInt
                idx := v_1.Args[0]
                mem := v.Args[2]
-               v.reset(Op386MOVLloadidx4)
-               v.AuxInt = c + 4*d
+               v.reset(Op386MOVWstoreconstidx2)
+               v.AuxInt = ValAndOff(x).add(2 * c)
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVLstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVLstore  [off1+off2] {sym} ptr val mem)
+       // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               i := v.Args[1]
+               x := v.Args[2]
+               if x.Op != Op386MOVWstoreconstidx2 {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               a := x.AuxInt
+               if x.Aux != s {
                        break
                }
-               v.reset(Op386MOVLstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
+               if p != x.Args[0] {
+                       break
+               }
+               if i != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVLstoreconstidx1)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, Op386SHLLconst, i.Type)
+               v0.AuxInt = 1
+               v0.AddArg(i)
+               v.AddArg(v0)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
-       // cond: validOff(off)
-       // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
+       return false
+}
+func rewriteValue386_Op386MOVWstoreidx1(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem)
+       // cond:
+       // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
        for {
-               off := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if v_1.Op != Op386SHLLconst {
                        break
                }
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(validOff(off)) {
+               if v_1.AuxInt != 1 {
                        break
                }
-               v.reset(Op386MOVLstoreconst)
-               v.AuxInt = makeValAndOff(int64(int32(c)), off)
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVWstoreidx2)
+               v.AuxInt = c
                v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+       // cond:
+       // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_0.Op != Op386ADDLconst {
                        break
                }
-               v.reset(Op386MOVLstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVWstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       // match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // cond:
+       // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ADDLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(Op386MOVLstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVWstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL4 {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != Op386SHRLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_2.AuxInt != 16 {
                        break
                }
-               v.reset(Op386MOVLstoreidx4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != Op386MOVWstoreidx1 {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               if x.AuxInt != i-2 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               if w != x.Args[2] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
                        break
                }
                v.reset(Op386MOVLstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
                v.AddArg(idx)
-               v.AddArg(val)
+               v.AddArg(w)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVLstoreconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-       // cond: ValAndOff(sc).canAdd(off)
-       // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+       // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
        for {
-               sc := v.AuxInt
+               i := v.AuxInt
                s := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != Op386SHRLconst {
                        break
                }
-               off := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(ValAndOff(sc).canAdd(off)) {
+               j := v_2.AuxInt
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != Op386MOVWstoreidx1 {
                        break
                }
-               v.reset(Op386MOVLstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = s
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-       // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-       for {
-               sc := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               if x.AuxInt != i-2 {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+               if x.Aux != s {
                        break
                }
-               v.reset(Op386MOVLstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
+               if p != x.Args[0] {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if idx != x.Args[1] {
                        break
                }
-               v.reset(Op386MOVLstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL4 {
+               w0 := x.Args[2]
+               if w0.Op != Op386SHRLconst {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if w0.AuxInt != j-16 {
                        break
                }
-               v.reset(Op386MOVLstoreconstidx4)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVLstoreidx1)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
                v.AddArg(idx)
+               v.AddArg(w0)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem)
+       return false
+}
+func rewriteValue386_Op386MOVWstoreidx2(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)
        // cond:
-       // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
+       // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
+               if v_0.Op != Op386ADDLconst {
                        break
                }
+               d := v_0.AuxInt
                ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               v.reset(Op386MOVLstoreconstidx1)
-               v.AuxInt = x
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVWstoreidx2)
+               v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVLstoreconstidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
+       // match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem)
        // cond:
-       // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
+       // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
        for {
                c := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
-                       break
-               }
-               if v_1.AuxInt != 2 {
+               if v_1.Op != Op386ADDLconst {
                        break
                }
+               d := v_1.AuxInt
                idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVLstoreconstidx4)
-               v.AuxInt = c
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(Op386MOVWstoreidx2)
+               v.AuxInt = c + 2*d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
-       // cond:
-       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+       // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w mem)
+       for {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != Op386SHRLconst {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVLstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               if v_2.AuxInt != 16 {
+                       break
+               }
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != Op386MOVWstoreidx2 {
+                       break
+               }
+               if x.AuxInt != i-2 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               if w != x.Args[2] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVLstoreidx1)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, Op386SHLLconst, idx.Type)
+               v0.AuxInt = 1
+               v0.AddArg(idx)
+               v.AddArg(v0)
+               v.AddArg(w)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
-       // cond:
-       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w0 mem)
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != Op386SHRLconst {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVLstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               j := v_2.AuxInt
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != Op386MOVWstoreidx2 {
+                       break
+               }
+               if x.AuxInt != i-2 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               w0 := x.Args[2]
+               if w0.Op != Op386SHRLconst {
+                       break
+               }
+               if w0.AuxInt != j-16 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(Op386MOVLstoreidx1)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, Op386SHLLconst, idx.Type)
+               v0.AuxInt = 1
+               v0.AddArg(idx)
+               v.AddArg(v0)
+               v.AddArg(w0)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVLstoreconstidx4(v *Value, config *Config) bool {
+func rewriteValue386_Op386MULL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem)
+       // match: (MULL x (MOVLconst [c]))
        // cond:
-       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (MULLconst [c] x)
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVLstoreconstidx4)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(Op386MULLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem)
+       // match: (MULL (MOVLconst [c]) x)
        // cond:
-       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
+       // result: (MULLconst [c] x)
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVLstoreconstidx4)
-               v.AuxInt = ValAndOff(x).add(4 * c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(Op386MULLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVLstoreidx1(v *Value, config *Config) bool {
+func rewriteValue386_Op386MULLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem)
+       // match: (MULLconst [c] (MULLconst [d] x))
        // cond:
-       // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
+       // result: (MULLconst [int64(int32(c * d))] x)
        for {
                c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
-                       break
-               }
-               if v_1.AuxInt != 2 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MULLconst {
                        break
                }
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVLstoreidx4)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(Op386MULLconst)
+               v.AuxInt = int64(int32(c * d))
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+       // match: (MULLconst [-1] x)
        // cond:
-       // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (NEGL x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v.AuxInt != -1 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVLstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386NEGL)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // match: (MULLconst [0] _)
        // cond:
-       // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v.AuxInt != 0 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVLstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVLstoreidx4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+       // match: (MULLconst [1] x)
        // cond:
-       // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
+       // result: x
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v.AuxInt != 1 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVLstoreidx4)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // match: (MULLconst [3] x)
        // cond:
-       // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
+       // result: (LEAL2 x x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v.AuxInt != 3 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVLstoreidx4)
-               v.AuxInt = c + 4*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL2)
+               v.AddArg(x)
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVSDload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVSDload [off1+off2] {sym} ptr mem)
+       // match: (MULLconst [5] x)
+       // cond:
+       // result: (LEAL4 x x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               if v.AuxInt != 5 {
                        break
                }
-               v.reset(Op386MOVSDload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL4)
+               v.AddArg(x)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       // match: (MULLconst [7] x)
+       // cond:
+       // result: (LEAL8 (NEGL <v.Type> x) x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v.AuxInt != 7 {
                        break
                }
-               v.reset(Op386MOVSDload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL8)
+               v0 := b.NewValue0(v.Line, Op386NEGL, v.Type)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (MULLconst [9] x)
+       // cond:
+       // result: (LEAL8 x x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v.AuxInt != 9 {
                        break
                }
-               v.reset(Op386MOVSDloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL8)
+               v.AddArg(x)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (MULLconst [11] x)
+       // cond:
+       // result: (LEAL2 x (LEAL4 <v.Type> x x))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL8 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v.AuxInt != 11 {
                        break
                }
-               v.reset(Op386MOVSDloadidx8)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL2)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVSDload [off] {sym} (ADDL ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
+       // match: (MULLconst [13] x)
+       // cond:
+       // result: (LEAL4 x (LEAL2 <v.Type> x x))
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
+               if v.AuxInt != 13 {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
+               x := v.Args[0]
+               v.reset(Op386LEAL4)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MULLconst [21] x)
+       // cond:
+       // result: (LEAL4 x (LEAL4 <v.Type> x x))
+       for {
+               if v.AuxInt != 21 {
                        break
                }
-               v.reset(Op386MOVSDloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL4)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVSDloadidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+       // match: (MULLconst [25] x)
        // cond:
-       // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (LEAL8 x (LEAL2 <v.Type> x x))
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v.AuxInt != 25 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVSDloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL8)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // match: (MULLconst [37] x)
        // cond:
-       // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (LEAL4 x (LEAL8 <v.Type> x x))
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v.AuxInt != 37 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVSDloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL4)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVSDloadidx8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem)
+       // match: (MULLconst [41] x)
        // cond:
-       // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
+       // result: (LEAL8 x (LEAL4 <v.Type> x x))
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v.AuxInt != 41 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVSDloadidx8)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL8)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // match: (MULLconst [73] x)
        // cond:
-       // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
+       // result: (LEAL8 x (LEAL8 <v.Type> x x))
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v.AuxInt != 73 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVSDloadidx8)
-               v.AuxInt = c + 8*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386LEAL8)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVSDstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
+       // match: (MULLconst [c] x)
+       // cond: isPowerOfTwo(c)
+       // result: (SHLLconst [log2(c)] x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c)) {
                        break
                }
-               v.reset(Op386MOVSDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386SHLLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       // match: (MULLconst [c] x)
+       // cond: isPowerOfTwo(c+1) && c >= 15
+       // result: (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c+1) && c >= 15) {
                        break
                }
-               v.reset(Op386MOVSDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386SUBL)
+               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+               v0.AuxInt = log2(c + 1)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       // match: (MULLconst [c] x)
+       // cond: isPowerOfTwo(c-1) && c >= 17
+       // result: (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c-1) && c >= 17) {
                        break
                }
-               v.reset(Op386MOVSDstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386LEAL1)
+               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+               v0.AuxInt = log2(c - 1)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       // match: (MULLconst [c] x)
+       // cond: isPowerOfTwo(c-2) && c >= 34
+       // result: (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL8 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c-2) && c >= 34) {
                        break
                }
-               v.reset(Op386MOVSDstoreidx8)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386LEAL2)
+               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+               v0.AuxInt = log2(c - 2)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
+       // match: (MULLconst [c] x)
+       // cond: isPowerOfTwo(c-4) && c >= 68
+       // result: (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c-4) && c >= 68) {
                        break
                }
-               v.reset(Op386MOVSDstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386LEAL4)
+               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+               v0.AuxInt = log2(c - 4)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVSDstoreidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+       // match: (MULLconst [c] x)
+       // cond: isPowerOfTwo(c-8) && c >= 136
+       // result: (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
        for {
                c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               x := v.Args[0]
+               if !(isPowerOfTwo(c-8) && c >= 136) {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVSDstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386LEAL8)
+               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
+               v0.AuxInt = log2(c - 8)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-       // cond:
-       // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
+       // match: (MULLconst [c] x)
+       // cond: c%3 == 0 && isPowerOfTwo(c/3)
+       // result: (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
        for {
                c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               x := v.Args[0]
+               if !(c%3 == 0 && isPowerOfTwo(c/3)) {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVSDstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386SHLLconst)
+               v.AuxInt = log2(c / 3)
+               v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MOVSDstoreidx8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
+       // match: (MULLconst [c] x)
+       // cond: c%5 == 0 && isPowerOfTwo(c/5)
+       // result: (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
        for {
                c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               x := v.Args[0]
+               if !(c%5 == 0 && isPowerOfTwo(c/5)) {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVSDstoreidx8)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386SHLLconst)
+               v.AuxInt = log2(c / 5)
+               v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // match: (MULLconst [c] x)
+       // cond: c%9 == 0 && isPowerOfTwo(c/9)
+       // result: (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+                       break
+               }
+               v.reset(Op386SHLLconst)
+               v.AuxInt = log2(c / 9)
+               v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MULLconst [c] (MOVLconst [d]))
        // cond:
-       // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
+       // result: (MOVLconst [int64(int32(c*d))])
        for {
                c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVSDstoreidx8)
-               v.AuxInt = c + 8*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = int64(int32(c * d))
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVSSload(v *Value, config *Config) bool {
+func rewriteValue386_Op386NEGL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVSSload [off1+off2] {sym} ptr mem)
+       // match: (NEGL (MOVLconst [c]))
+       // cond:
+       // result: (MOVLconst [int64(int32(-c))])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVSSload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = int64(int32(-c))
                return true
        }
-       // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       return false
+}
+func rewriteValue386_Op386NOTL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NOTL (MOVLconst [c]))
+       // cond:
+       // result: (MOVLconst [^c])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVSSload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = ^c
                return true
        }
-       // match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       return false
+}
+func rewriteValue386_Op386ORL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORL x (MOVLconst [c]))
+       // cond:
+       // result: (ORLconst [c] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVSSloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(Op386ORLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (ORL (MOVLconst [c]) x)
+       // cond:
+       // result: (ORLconst [c] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL4 {
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(Op386ORLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (ORL x x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
                        break
                }
-               v.reset(Op386MOVSSloadidx4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSSload [off] {sym} (ADDL ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
+       // match: (ORL                  x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
+       // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
+               x0 := v.Args[0]
+               if x0.Op != Op386MOVBload {
                        break
                }
-               v.reset(Op386MOVSSloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVSSloadidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               mem := x0.Args[1]
+               s0 := v.Args[1]
+               if s0.Op != Op386SHLLconst {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVSSloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
-       // cond:
-       // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if s0.AuxInt != 8 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVSSloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVSSloadidx4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               x1 := s0.Args[0]
+               if x1.Op != Op386MOVBload {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVSSloadidx4)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
-       // cond:
-       // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if x1.AuxInt != i+1 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVSSloadidx4)
-               v.AuxInt = c + 4*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVSSstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if x1.Aux != s {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               if p != x1.Args[0] {
                        break
                }
-               v.reset(Op386MOVSSstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               if mem != x1.Args[1] {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
                        break
                }
-               v.reset(Op386MOVSSstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
+               b = mergePoint(b, x0, x1)
+               v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
+               v0.AddArg(mem)
                return true
        }
-       // match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       // match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
+       // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
+               o0 := v.Args[0]
+               if o0.Op != Op386ORL {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               o1 := o0.Args[0]
+               if o1.Op != Op386ORL {
                        break
                }
-               v.reset(Op386MOVSSstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL4 {
+               x0 := o1.Args[0]
+               if x0.Op != Op386MOVBload {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               mem := x0.Args[1]
+               s0 := o1.Args[1]
+               if s0.Op != Op386SHLLconst {
                        break
                }
-               v.reset(Op386MOVSSstoreidx4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
+               if s0.AuxInt != 8 {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               x1 := s0.Args[0]
+               if x1.Op != Op386MOVBload {
                        break
                }
-               v.reset(Op386MOVSSstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVSSstoreidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if x1.AuxInt != i+1 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVSSstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-       // cond:
-       // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if x1.Aux != s {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVSSstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVSSstoreidx4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if p != x1.Args[0] {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVSSstoreidx4)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-       // cond:
-       // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if mem != x1.Args[1] {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVSSstoreidx4)
-               v.AuxInt = c + 4*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVWLSX(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
-       for {
-               x := v.Args[0]
-               if x.Op != Op386MOVWload {
+               s1 := o0.Args[1]
+               if s1.Op != Op386SHLLconst {
                        break
                }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               mem := x.Args[1]
-               if !(x.Uses == 1 && clobber(x)) {
+               if s1.AuxInt != 16 {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, Op386MOVWLSXload, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
-               return true
-       }
-       // match: (MOVWLSX (ANDLconst [c] x))
-       // cond: c & 0x8000 == 0
-       // result: (ANDLconst [c & 0x7fff] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ANDLconst {
+               x2 := s1.Args[0]
+               if x2.Op != Op386MOVBload {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               if !(c&0x8000 == 0) {
+               if x2.AuxInt != i+2 {
                        break
                }
-               v.reset(Op386ANDLconst)
-               v.AuxInt = c & 0x7fff
-               v.AddArg(x)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVWLSXload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               if x2.Aux != s {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if p != x2.Args[0] {
                        break
                }
-               v.reset(Op386MOVWLSXload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386MOVWLZX(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
-       for {
-               x := v.Args[0]
-               if x.Op != Op386MOVWload {
+               if mem != x2.Args[1] {
                        break
                }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               mem := x.Args[1]
-               if !(x.Uses == 1 && clobber(x)) {
+               s2 := v.Args[1]
+               if s2.Op != Op386SHLLconst {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, Op386MOVWload, v.Type)
+               if s2.AuxInt != 24 {
+                       break
+               }
+               x3 := s2.Args[0]
+               if x3.Op != Op386MOVBload {
+                       break
+               }
+               if x3.AuxInt != i+3 {
+                       break
+               }
+               if x3.Aux != s {
+                       break
+               }
+               if p != x3.Args[0] {
+                       break
+               }
+               if mem != x3.Args[1] {
+                       break
+               }
+               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+                       break
+               }
+               b = mergePoint(b, x0, x1, x2, x3)
+               v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
                v.reset(OpCopy)
                v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
                v0.AddArg(mem)
                return true
        }
-       // match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
+       // match: (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
+       // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
        for {
-               x := v.Args[0]
-               if x.Op != Op386MOVWloadidx1 {
+               x0 := v.Args[0]
+               if x0.Op != Op386MOVBloadidx1 {
                        break
                }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               idx := x.Args[1]
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               idx := x0.Args[1]
+               mem := x0.Args[2]
+               s0 := v.Args[1]
+               if s0.Op != Op386SHLLconst {
                        break
                }
-               b = x.Block
+               if s0.AuxInt != 8 {
+                       break
+               }
+               x1 := s0.Args[0]
+               if x1.Op != Op386MOVBloadidx1 {
+                       break
+               }
+               if x1.AuxInt != i+1 {
+                       break
+               }
+               if x1.Aux != s {
+                       break
+               }
+               if p != x1.Args[0] {
+                       break
+               }
+               if idx != x1.Args[1] {
+                       break
+               }
+               if mem != x1.Args[2] {
+                       break
+               }
+               if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
+                       break
+               }
+               b = mergePoint(b, x0, x1)
                v0 := b.NewValue0(v.Line, Op386MOVWloadidx1, v.Type)
                v.reset(OpCopy)
                v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
                v0.AddArg(idx)
                v0.AddArg(mem)
                return true
        }
-       // match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
+       // match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
+       // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
        for {
-               x := v.Args[0]
-               if x.Op != Op386MOVWloadidx2 {
+               o0 := v.Args[0]
+               if o0.Op != Op386ORL {
                        break
                }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               idx := x.Args[1]
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               o1 := o0.Args[0]
+               if o1.Op != Op386ORL {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, Op386MOVWloadidx2, v.Type)
+               x0 := o1.Args[0]
+               if x0.Op != Op386MOVBloadidx1 {
+                       break
+               }
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               idx := x0.Args[1]
+               mem := x0.Args[2]
+               s0 := o1.Args[1]
+               if s0.Op != Op386SHLLconst {
+                       break
+               }
+               if s0.AuxInt != 8 {
+                       break
+               }
+               x1 := s0.Args[0]
+               if x1.Op != Op386MOVBloadidx1 {
+                       break
+               }
+               if x1.AuxInt != i+1 {
+                       break
+               }
+               if x1.Aux != s {
+                       break
+               }
+               if p != x1.Args[0] {
+                       break
+               }
+               if idx != x1.Args[1] {
+                       break
+               }
+               if mem != x1.Args[2] {
+                       break
+               }
+               s1 := o0.Args[1]
+               if s1.Op != Op386SHLLconst {
+                       break
+               }
+               if s1.AuxInt != 16 {
+                       break
+               }
+               x2 := s1.Args[0]
+               if x2.Op != Op386MOVBloadidx1 {
+                       break
+               }
+               if x2.AuxInt != i+2 {
+                       break
+               }
+               if x2.Aux != s {
+                       break
+               }
+               if p != x2.Args[0] {
+                       break
+               }
+               if idx != x2.Args[1] {
+                       break
+               }
+               if mem != x2.Args[2] {
+                       break
+               }
+               s2 := v.Args[1]
+               if s2.Op != Op386SHLLconst {
+                       break
+               }
+               if s2.AuxInt != 24 {
+                       break
+               }
+               x3 := s2.Args[0]
+               if x3.Op != Op386MOVBloadidx1 {
+                       break
+               }
+               if x3.AuxInt != i+3 {
+                       break
+               }
+               if x3.Aux != s {
+                       break
+               }
+               if p != x3.Args[0] {
+                       break
+               }
+               if idx != x3.Args[1] {
+                       break
+               }
+               if mem != x3.Args[2] {
+                       break
+               }
+               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+                       break
+               }
+               b = mergePoint(b, x0, x1, x2, x3)
+               v0 := b.NewValue0(v.Line, Op386MOVLloadidx1, v.Type)
                v.reset(OpCopy)
                v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
                v0.AddArg(idx)
                v0.AddArg(mem)
                return true
        }
-       // match: (MOVWLZX (ANDLconst [c] x))
-       // cond:
-       // result: (ANDLconst [c & 0xffff] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ANDLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(Op386ANDLconst)
-               v.AuxInt = c & 0xffff
-               v.AddArg(x)
-               return true
-       }
        return false
 }
-func rewriteValue386_Op386MOVWload(v *Value, config *Config) bool {
+func rewriteValue386_Op386ORLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // match: (ORLconst [c] x)
+       // cond: int32(c)==0
        // result: x
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVWstore {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(int32(c) == 0) {
                        break
                }
                v.reset(OpCopy)
@@ -7274,3498 +7422,3193 @@ func rewriteValue386_Op386MOVWload(v *Value, config *Config) bool {
                v.AddArg(x)
                return true
        }
-       // match: (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVWload  [off1+off2] {sym} ptr mem)
+       // match: (ORLconst [c] _)
+       // cond: int32(c)==-1
+       // result: (MOVLconst [-1])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               c := v.AuxInt
+               if !(int32(c) == -1) {
                        break
                }
-               v.reset(Op386MOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = -1
                return true
        }
-       // match: (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       // match: (ORLconst [c] (MOVLconst [d]))
+       // cond:
+       // result: (MOVLconst [c|d])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = c | d
                return true
        }
-       // match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       return false
+}
+func rewriteValue386_Op386ROLBconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ROLBconst [c] (ROLBconst [d] x))
+       // cond:
+       // result: (ROLBconst [(c+d)& 7] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(Op386MOVWloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL2 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_0.Op != Op386ROLBconst {
                        break
                }
-               v.reset(Op386MOVWloadidx2)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(Op386ROLBconst)
+               v.AuxInt = (c + d) & 7
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWload [off] {sym} (ADDL ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVWloadidx1 [off] {sym} ptr idx mem)
+       // match: (ROLBconst [0] x)
+       // cond:
+       // result: x
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(Op386MOVWloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVWloadidx1(v *Value, config *Config) bool {
+func rewriteValue386_Op386ROLLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
-       // cond:
-       // result: (MOVWloadidx2 [c] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
-                       break
-               }
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVWloadidx2)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
+       // match: (ROLLconst [c] (ROLLconst [d] x))
        // cond:
-       // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (ROLLconst [(c+d)&31] x)
        for {
                c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386ROLLconst {
                        break
                }
                d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVWloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(Op386ROLLconst)
+               v.AuxInt = (c + d) & 31
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // match: (ROLLconst [0] x)
        // cond:
-       // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+       // result: x
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v.AuxInt != 0 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVWloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVWloadidx2(v *Value, config *Config) bool {
+func rewriteValue386_Op386ROLWconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem)
+       // match: (ROLWconst [c] (ROLWconst [d] x))
        // cond:
-       // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
+       // result: (ROLWconst [(c+d)&15] x)
        for {
                c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386ROLWconst {
                        break
                }
                d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVWloadidx2)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(Op386ROLWconst)
+               v.AuxInt = (c + d) & 15
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem)
+       // match: (ROLWconst [0] x)
        // cond:
-       // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
+       // result: x
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               if v.AuxInt != 0 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVWloadidx2)
-               v.AuxInt = c + 2*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVWstore(v *Value, config *Config) bool {
+func rewriteValue386_Op386SARB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
+       // match: (SARB x (MOVLconst [c]))
        // cond:
-       // result: (MOVWstore [off] {sym} ptr x mem)
+       // result: (SARBconst [c&31] x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVWLSX {
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVWstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               c := v_1.AuxInt
+               v.reset(Op386SARBconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(mem)
                return true
        }
-       // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
+       // match: (SARB x (MOVLconst [c]))
        // cond:
-       // result: (MOVWstore [off] {sym} ptr x mem)
+       // result: (SARBconst [c&31] x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVWLZX {
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVWstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               c := v_1.AuxInt
+               v.reset(Op386SARBconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(mem)
                return true
        }
-       // match: (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVWstore  [off1+off2] {sym} ptr val mem)
+       return false
+}
+func rewriteValue386_Op386SARBconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARBconst [c] (MOVLconst [d]))
+       // cond:
+       // result: (MOVLconst [d>>uint64(c)])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = d >> uint64(c)
                return true
        }
-       // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
-       // cond: validOff(off)
-       // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+       return false
+}
+func rewriteValue386_Op386SARL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARL x (MOVLconst [c]))
+       // cond:
+       // result: (SARLconst [c&31] x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != Op386MOVLconst {
                        break
                }
                c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(validOff(off)) {
+               v.reset(Op386SARLconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
+               return true
+       }
+       // match: (SARL x (MOVLconst [c]))
+       // cond:
+       // result: (SARLconst [c&31] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVWstoreconst)
-               v.AuxInt = makeValAndOff(int64(int16(c)), off)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(Op386SARLconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       // match: (SARL x (ANDLconst [31] y))
+       // cond:
+       // result: (SARL x y)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ANDLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_1.AuxInt != 31 {
                        break
                }
-               v.reset(Op386MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
+               y := v_1.Args[0]
+               v.reset(Op386SARL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       return false
+}
+func rewriteValue386_Op386SARLconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARLconst [c] (MOVLconst [d]))
+       // cond:
+       // result: (MOVLconst [d>>uint64(c)])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVWstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = d >> uint64(c)
                return true
        }
-       // match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       return false
+}
+func rewriteValue386_Op386SARW(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARW x (MOVLconst [c]))
+       // cond:
+       // result: (SARWconst [c&31] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL2 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               v.reset(Op386SARWconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
+               return true
+       }
+       // match: (SARW x (MOVLconst [c]))
+       // cond:
+       // result: (SARWconst [c&31] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVWstoreidx2)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(Op386SARWconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
+       return false
+}
+func rewriteValue386_Op386SARWconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARWconst [c] (MOVLconst [d]))
+       // cond:
+       // result: (MOVLconst [d>>uint64(c)])
        for {
-               off := v.AuxInt
-               sym := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVWstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = d >> uint64(c)
                return true
        }
-       // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstore [i-2] {s} p w mem)
+       return false
+}
+func rewriteValue386_Op386SBBL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBBL x (MOVLconst [c]) f)
+       // cond:
+       // result: (SBBLconst [c] x f)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386SHRLconst {
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               if v_1.AuxInt != 16 {
+               c := v_1.AuxInt
+               f := v.Args[2]
+               v.reset(Op386SBBLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(f)
+               return true
+       }
+       return false
+}
+func rewriteValue386_Op386SBBLcarrymask(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBBLcarrymask (FlagEQ))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != Op386MOVWstore {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SBBLcarrymask (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [-1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               if x.AuxInt != i-2 {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = -1
+               return true
+       }
+       // match: (SBBLcarrymask (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if w != x.Args[1] {
-                       break
-               }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
-                       break
-               }
-               v.reset(Op386MOVLstore)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstore [i-2] {s} p w0 mem)
+       // match: (SBBLcarrymask (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [-1])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHRLconst {
-                       break
-               }
-               j := v_1.AuxInt
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != Op386MOVWstore {
-                       break
-               }
-               if x.AuxInt != i-2 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               w0 := x.Args[1]
-               if w0.Op != Op386SHRLconst {
-                       break
-               }
-               if w0.AuxInt != j-16 {
-                       break
-               }
-               if w != w0.Args[0] {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = -1
+               return true
+       }
+       // match: (SBBLcarrymask (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               v.reset(Op386MOVLstore)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w0)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVWstoreconst(v *Value, config *Config) bool {
+func rewriteValue386_Op386SETA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-       // cond: ValAndOff(sc).canAdd(off)
-       // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+       // match: (SETA (InvertFlags x))
+       // cond:
+       // result: (SETB x)
        for {
-               sc := v.AuxInt
-               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
-                       break
-               }
-               off := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(ValAndOff(sc).canAdd(off)) {
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               v.reset(Op386MOVWstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = s
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(Op386SETB)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-       // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+       // match: (SETA (FlagEQ))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               sc := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL {
-                       break
-               }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               v.reset(Op386MOVWstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (SETA (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               x := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL1 {
-                       break
-               }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               v.reset(Op386MOVWstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (SETA (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               x := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386LEAL2 {
-                       break
-               }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               v.reset(Op386MOVWstoreconstidx2)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem)
+       // match: (SETA (FlagGT_ULT))
        // cond:
-       // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
+       // result: (MOVLconst [0])
        for {
-               x := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDL {
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               v.reset(Op386MOVWstoreconstidx1)
-               v.AuxInt = x
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+       // match: (SETA (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               x := v.Args[1]
-               if x.Op != Op386MOVWstoreconst {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               mem := x.Args[1]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               v.reset(Op386MOVLstoreconst)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVWstoreconstidx1(v *Value, config *Config) bool {
+func rewriteValue386_Op386SETAE(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
+       // match: (SETAE (InvertFlags x))
        // cond:
-       // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
+       // result: (SETBE x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               if v_1.AuxInt != 1 {
+               x := v_0.Args[0]
+               v.reset(Op386SETBE)
+               v.AddArg(x)
+               return true
+       }
+       // match: (SETAE (FlagEQ))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVWstoreconstidx2)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
+       // match: (SETAE (FlagLT_ULT))
        // cond:
-       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (MOVLconst [0])
        for {
-               x := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVWstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
+       // match: (SETAE (FlagLT_UGT))
        // cond:
-       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (MOVLconst [1])
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVWstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
+       // match: (SETAE (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               i := v.Args[1]
-               x := v.Args[2]
-               if x.Op != Op386MOVWstoreconstidx1 {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if i != x.Args[1] {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETAE (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               v.reset(Op386MOVLstoreconstidx1)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(i)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVWstoreconstidx2(v *Value, config *Config) bool {
+func rewriteValue386_Op386SETB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem)
+       // match: (SETB (InvertFlags x))
        // cond:
-       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (SETA x)
        for {
-               x := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(Op386MOVWstoreconstidx2)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(Op386SETA)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem)
+       // match: (SETB (FlagEQ))
        // cond:
-       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
+       // result: (MOVLconst [0])
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(Op386MOVWstoreconstidx2)
-               v.AuxInt = ValAndOff(x).add(2 * c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem)
+       // match: (SETB (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               i := v.Args[1]
-               x := v.Args[2]
-               if x.Op != Op386MOVWstoreconstidx2 {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETB (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               if i != x.Args[1] {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETB (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETB (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               v.reset(Op386MOVLstoreconstidx1)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, Op386SHLLconst, i.Type)
-               v0.AuxInt = 1
-               v0.AddArg(i)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVWstoreidx1(v *Value, config *Config) bool {
+func rewriteValue386_Op386SETBE(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem)
+       // match: (SETBE (InvertFlags x))
        // cond:
-       // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
+       // result: (SETAE x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386SHLLconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVWstoreidx2)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(Op386SETAE)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+       // match: (SETBE (FlagEQ))
        // cond:
-       // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVWstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // match: (SETBE (FlagLT_ULT))
        // cond:
-       // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVWstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
+       // match: (SETBE (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != Op386SHRLconst {
-                       break
-               }
-               if v_2.AuxInt != 16 {
-                       break
-               }
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != Op386MOVWstoreidx1 {
-                       break
-               }
-               if x.AuxInt != i-2 {
-                       break
-               }
-               if x.Aux != s {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETBE (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               if idx != x.Args[1] {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETBE (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               if w != x.Args[2] {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValue386_Op386SETEQ(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SETEQ (InvertFlags x))
+       // cond:
+       // result: (SETEQ x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               x := v_0.Args[0]
+               v.reset(Op386SETEQ)
+               v.AddArg(x)
+               return true
+       }
+       // match: (SETEQ (FlagEQ))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               v.reset(Op386MOVLstoreidx1)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(idx)
-               v.AddArg(w)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
+       // match: (SETEQ (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != Op386SHRLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               j := v_2.AuxInt
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != Op386MOVWstoreidx1 {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETEQ (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               if x.AuxInt != i-2 {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETEQ (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               if x.Aux != s {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETEQ (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValue386_Op386SETG(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SETG (InvertFlags x))
+       // cond:
+       // result: (SETL x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               if idx != x.Args[1] {
+               x := v_0.Args[0]
+               v.reset(Op386SETL)
+               v.AddArg(x)
+               return true
+       }
+       // match: (SETG (FlagEQ))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               w0 := x.Args[2]
-               if w0.Op != Op386SHRLconst {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETG (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               if w0.AuxInt != j-16 {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETG (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               if w != w0.Args[0] {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETG (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETG (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               v.reset(Op386MOVLstoreidx1)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(idx)
-               v.AddArg(w0)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
        return false
 }
-func rewriteValue386_Op386MOVWstoreidx2(v *Value, config *Config) bool {
+func rewriteValue386_Op386SETGE(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)
+       // match: (SETGE (InvertFlags x))
        // cond:
-       // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
+       // result: (SETLE x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != Op386ADDLconst {
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVWstoreidx2)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(Op386SETLE)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem)
+       // match: (SETGE (FlagEQ))
        // cond:
-       // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ADDLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(Op386MOVWstoreidx2)
-               v.AuxInt = c + 2*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w mem)
+       // match: (SETGE (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != Op386SHRLconst {
-                       break
-               }
-               if v_2.AuxInt != 16 {
-                       break
-               }
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != Op386MOVWstoreidx2 {
-                       break
-               }
-               if x.AuxInt != i-2 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if idx != x.Args[1] {
-                       break
-               }
-               if w != x.Args[2] {
-                       break
-               }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               v.reset(Op386MOVLstoreidx1)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, Op386SHLLconst, idx.Type)
-               v0.AuxInt = 1
-               v0.AddArg(idx)
-               v.AddArg(v0)
-               v.AddArg(w)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w0 mem)
+       // match: (SETGE (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != Op386SHRLconst {
-                       break
-               }
-               j := v_2.AuxInt
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != Op386MOVWstoreidx2 {
-                       break
-               }
-               if x.AuxInt != i-2 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if idx != x.Args[1] {
-                       break
-               }
-               w0 := x.Args[2]
-               if w0.Op != Op386SHRLconst {
-                       break
-               }
-               if w0.AuxInt != j-16 {
-                       break
-               }
-               if w != w0.Args[0] {
-                       break
-               }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               v.reset(Op386MOVLstoreidx1)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, Op386SHLLconst, idx.Type)
-               v0.AuxInt = 1
-               v0.AddArg(idx)
-               v.AddArg(v0)
-               v.AddArg(w0)
-               v.AddArg(mem)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       return false
-}
-func rewriteValue386_Op386MULL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MULL x (MOVLconst [c]))
+       // match: (SETGE (FlagGT_ULT))
        // cond:
-       // result: (MULLconst [c] x)
+       // result: (MOVLconst [1])
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(Op386MULLconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULL (MOVLconst [c]) x)
+       // match: (SETGE (FlagGT_UGT))
        // cond:
-       // result: (MULLconst [c] x)
+       // result: (MOVLconst [1])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(Op386MULLconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
        return false
 }
-func rewriteValue386_Op386MULLconst(v *Value, config *Config) bool {
+func rewriteValue386_Op386SETL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MULLconst [c] (MULLconst [d] x))
+       // match: (SETL (InvertFlags x))
        // cond:
-       // result: (MULLconst [int64(int32(c * d))] x)
+       // result: (SETG x)
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != Op386MULLconst {
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               d := v_0.AuxInt
                x := v_0.Args[0]
-               v.reset(Op386MULLconst)
-               v.AuxInt = int64(int32(c * d))
-               v.AddArg(x)
-               return true
-       }
-       // match: (MULLconst [-1] x)
-       // cond:
-       // result: (NEGL x)
-       for {
-               if v.AuxInt != -1 {
-                       break
-               }
-               x := v.Args[0]
-               v.reset(Op386NEGL)
+               v.reset(Op386SETG)
                v.AddArg(x)
                return true
        }
-       // match: (MULLconst [0] _)
+       // match: (SETL (FlagEQ))
        // cond:
        // result: (MOVLconst [0])
        for {
-               if v.AuxInt != 0 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
                v.reset(Op386MOVLconst)
                v.AuxInt = 0
                return true
        }
-       // match: (MULLconst [1] x)
+       // match: (SETL (FlagLT_ULT))
        // cond:
-       // result: x
+       // result: (MOVLconst [1])
        for {
-               if v.AuxInt != 1 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [3] x)
+       // match: (SETL (FlagLT_UGT))
        // cond:
-       // result: (LEAL2 x x)
+       // result: (MOVLconst [1])
        for {
-               if v.AuxInt != 3 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL2)
-               v.AddArg(x)
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [5] x)
+       // match: (SETL (FlagGT_ULT))
        // cond:
-       // result: (LEAL4 x x)
+       // result: (MOVLconst [0])
        for {
-               if v.AuxInt != 5 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL4)
-               v.AddArg(x)
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULLconst [7] x)
+       // match: (SETL (FlagGT_UGT))
        // cond:
-       // result: (LEAL8 (NEGL <v.Type> x) x)
+       // result: (MOVLconst [0])
        for {
-               if v.AuxInt != 7 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL8)
-               v0 := b.NewValue0(v.Line, Op386NEGL, v.Type)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULLconst [9] x)
+       return false
+}
+func rewriteValue386_Op386SETLE(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SETLE (InvertFlags x))
        // cond:
-       // result: (LEAL8 x x)
+       // result: (SETGE x)
        for {
-               if v.AuxInt != 9 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL8)
-               v.AddArg(x)
+               x := v_0.Args[0]
+               v.reset(Op386SETGE)
                v.AddArg(x)
                return true
        }
-       // match: (MULLconst [11] x)
+       // match: (SETLE (FlagEQ))
        // cond:
-       // result: (LEAL2 x (LEAL4 <v.Type> x x))
+       // result: (MOVLconst [1])
        for {
-               if v.AuxInt != 11 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL2)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [13] x)
+       // match: (SETLE (FlagLT_ULT))
        // cond:
-       // result: (LEAL4 x (LEAL2 <v.Type> x x))
+       // result: (MOVLconst [1])
        for {
-               if v.AuxInt != 13 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL4)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [21] x)
+       // match: (SETLE (FlagLT_UGT))
        // cond:
-       // result: (LEAL4 x (LEAL4 <v.Type> x x))
+       // result: (MOVLconst [1])
        for {
-               if v.AuxInt != 21 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL4)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [25] x)
+       // match: (SETLE (FlagGT_ULT))
        // cond:
-       // result: (LEAL8 x (LEAL2 <v.Type> x x))
+       // result: (MOVLconst [0])
        for {
-               if v.AuxInt != 25 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL8)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULLconst [37] x)
+       // match: (SETLE (FlagGT_UGT))
        // cond:
-       // result: (LEAL4 x (LEAL8 <v.Type> x x))
+       // result: (MOVLconst [0])
        for {
-               if v.AuxInt != 37 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL4)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULLconst [41] x)
+       return false
+}
+func rewriteValue386_Op386SETNE(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SETNE (InvertFlags x))
        // cond:
-       // result: (LEAL8 x (LEAL4 <v.Type> x x))
+       // result: (SETNE x)
        for {
-               if v.AuxInt != 41 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386InvertFlags {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL8)
+               x := v_0.Args[0]
+               v.reset(Op386SETNE)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
                return true
        }
-       // match: (MULLconst [73] x)
+       // match: (SETNE (FlagEQ))
        // cond:
-       // result: (LEAL8 x (LEAL8 <v.Type> x x))
+       // result: (MOVLconst [0])
        for {
-               if v.AuxInt != 73 {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagEQ {
                        break
                }
-               x := v.Args[0]
-               v.reset(Op386LEAL8)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULLconst [c] x)
-       // cond: isPowerOfTwo(c)
-       // result: (SHLLconst [log2(c)] x)
+       // match: (SETNE (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(isPowerOfTwo(c)) {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_ULT {
                        break
                }
-               v.reset(Op386SHLLconst)
-               v.AuxInt = log2(c)
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [c] x)
-       // cond: isPowerOfTwo(c+1) && c >= 15
-       // result: (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
+       // match: (SETNE (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(isPowerOfTwo(c+1) && c >= 15) {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagLT_UGT {
                        break
                }
-               v.reset(Op386SUBL)
-               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-               v0.AuxInt = log2(c + 1)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [c] x)
-       // cond: isPowerOfTwo(c-1) && c >= 17
-       // result: (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
+       // match: (SETNE (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(isPowerOfTwo(c-1) && c >= 17) {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_ULT {
                        break
                }
-               v.reset(Op386LEAL1)
-               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-               v0.AuxInt = log2(c - 1)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [c] x)
-       // cond: isPowerOfTwo(c-2) && c >= 34
-       // result: (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
+       // match: (SETNE (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(isPowerOfTwo(c-2) && c >= 34) {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386FlagGT_UGT {
                        break
                }
-               v.reset(Op386LEAL2)
-               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-               v0.AuxInt = log2(c - 2)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(x)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [c] x)
-       // cond: isPowerOfTwo(c-4) && c >= 68
-       // result: (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
+       return false
+}
+func rewriteValue386_Op386SHLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SHLL x (MOVLconst [c]))
+       // cond:
+       // result: (SHLLconst [c&31] x)
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(isPowerOfTwo(c-4) && c >= 68) {
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386LEAL4)
-               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-               v0.AuxInt = log2(c - 4)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               c := v_1.AuxInt
+               v.reset(Op386SHLLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
                return true
        }
-       // match: (MULLconst [c] x)
-       // cond: isPowerOfTwo(c-8) && c >= 136
-       // result: (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
+       // match: (SHLL x (MOVLconst [c]))
+       // cond:
+       // result: (SHLLconst [c&31] x)
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(isPowerOfTwo(c-8) && c >= 136) {
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386LEAL8)
-               v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-               v0.AuxInt = log2(c - 8)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               c := v_1.AuxInt
+               v.reset(Op386SHLLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
                return true
        }
-       // match: (MULLconst [c] x)
-       // cond: c%3 == 0 && isPowerOfTwo(c/3)
-       // result: (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
+       // match: (SHLL x (ANDLconst [31] y))
+       // cond:
+       // result: (SHLL x y)
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ANDLconst {
                        break
                }
-               v.reset(Op386SHLLconst)
-               v.AuxInt = log2(c / 3)
-               v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               return true
-       }
-       // match: (MULLconst [c] x)
-       // cond: c%5 == 0 && isPowerOfTwo(c/5)
-       // result: (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
-       for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+               if v_1.AuxInt != 31 {
                        break
                }
-               v.reset(Op386SHLLconst)
-               v.AuxInt = log2(c / 5)
-               v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               y := v_1.Args[0]
+               v.reset(Op386SHLL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MULLconst [c] x)
-       // cond: c%9 == 0 && isPowerOfTwo(c/9)
-       // result: (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
+       return false
+}
+func rewriteValue386_Op386SHRB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SHRB x (MOVLconst [c]))
+       // cond:
+       // result: (SHRBconst [c&31] x)
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386SHLLconst)
-               v.AuxInt = log2(c / 9)
-               v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               c := v_1.AuxInt
+               v.reset(Op386SHRBconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
                return true
        }
-       // match: (MULLconst [c] (MOVLconst [d]))
+       // match: (SHRB x (MOVLconst [c]))
        // cond:
-       // result: (MOVLconst [int64(int32(c*d))])
+       // result: (SHRBconst [c&31] x)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = int64(int32(c * d))
+               c := v_1.AuxInt
+               v.reset(Op386SHRBconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValue386_OpMod16(v *Value, config *Config) bool {
+func rewriteValue386_Op386SHRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod16  x y)
+       // match: (SHRL x (MOVLconst [c]))
        // cond:
-       // result: (MODW  x y)
+       // result: (SHRLconst [c&31] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386MODW)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(Op386SHRLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod16u x y)
+       // match: (SHRL x (MOVLconst [c]))
        // cond:
-       // result: (MODWU x y)
+       // result: (SHRLconst [c&31] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386MODWU)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(Op386SHRLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValue386_OpMod32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod32  x y)
+       // match: (SHRL x (ANDLconst [31] y))
        // cond:
-       // result: (MODL  x y)
+       // result: (SHRL x y)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386MODL)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386ANDLconst {
+                       break
+               }
+               if v_1.AuxInt != 31 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(Op386SHRL)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
+func rewriteValue386_Op386SHRW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod32u x y)
+       // match: (SHRW x (MOVLconst [c]))
        // cond:
-       // result: (MODLU x y)
+       // result: (SHRWconst [c&31] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386MODLU)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(Op386SHRWconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
+               return true
+       }
+       // match: (SHRW x (MOVLconst [c]))
+       // cond:
+       // result: (SHRWconst [c&31] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(Op386SHRWconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValue386_OpMod8(v *Value, config *Config) bool {
+func rewriteValue386_Op386SUBL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod8   x y)
+       // match: (SUBL x (MOVLconst [c]))
        // cond:
-       // result: (MODW  (SignExt8to16 x) (SignExt8to16 y))
+       // result: (SUBLconst x [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386MODW)
-               v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(Op386SUBLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUBL (MOVLconst [c]) x)
+       // cond:
+       // result: (NEGL (SUBLconst <v.Type> x [c]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(Op386NEGL)
+               v0 := b.NewValue0(v.Line, Op386SUBLconst, v.Type)
+               v0.AuxInt = c
                v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
+       // match: (SUBL x x)
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
 }
-func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
+func rewriteValue386_Op386SUBLcarry(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod8u  x y)
+       // match: (SUBLcarry x (MOVLconst [c]))
        // cond:
-       // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+       // result: (SUBLconstcarry [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386MODWU)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(Op386SUBLconstcarry)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValue386_OpMove(v *Value, config *Config) bool {
+func rewriteValue386_Op386SUBLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Move [s] _ _ mem)
-       // cond: SizeAndAlign(s).Size() == 0
-       // result: mem
+       // match: (SUBLconst [c] x)
+       // cond: int32(c) == 0
+       // result: x
        for {
-               s := v.AuxInt
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 0) {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(int32(c) == 0) {
                        break
                }
                v.reset(OpCopy)
-               v.Type = mem.Type
-               v.AddArg(mem)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 1
-       // result: (MOVBstore dst (MOVBload src mem) mem)
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 1) {
-                       break
-               }
-               v.reset(Op386MOVBstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+       // match: (SUBLconst [c] x)
+       // cond:
+       // result: (ADDLconst [int64(int32(-c))] x)
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(Op386ADDLconst)
+               v.AuxInt = int64(int32(-c))
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2
-       // result: (MOVWstore dst (MOVWload src mem) mem)
+}
+func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (XORL x (MOVLconst [c]))
+       // cond:
+       // result: (XORLconst [c] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVWstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(Op386XORLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4
-       // result: (MOVLstore dst (MOVLload src mem) mem)
+       // match: (XORL (MOVLconst [c]) x)
+       // cond:
+       // result: (XORLconst [c] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4) {
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVLstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(Op386XORLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 3
-       // result: (MOVBstore [2] dst (MOVBload [2] src mem)            (MOVWstore dst (MOVWload src mem) mem))
+       // match: (XORL x x)
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 3) {
+               x := v.Args[0]
+               if x != v.Args[1] {
                        break
                }
-               v.reset(Op386MOVBstore)
-               v.AuxInt = 2
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
-               v0.AuxInt = 2
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386MOVWstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 5
-       // result: (MOVBstore [4] dst (MOVBload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
+       return false
+}
+func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (XORLconst [c] (XORLconst [d] x))
+       // cond:
+       // result: (XORLconst [c ^ d] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 5) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != Op386XORLconst {
                        break
                }
-               v.reset(Op386MOVBstore)
-               v.AuxInt = 4
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
-               v0.AuxInt = 4
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(Op386XORLconst)
+               v.AuxInt = c ^ d
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 6
-       // result: (MOVWstore [4] dst (MOVWload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
+       // match: (XORLconst [c] x)
+       // cond: int32(c)==0
+       // result: x
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 6) {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(int32(c) == 0) {
                        break
                }
-               v.reset(Op386MOVWstore)
-               v.AuxInt = 4
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
-               v0.AuxInt = 4
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 7
-       // result: (MOVLstore [3] dst (MOVLload [3] src mem)            (MOVLstore dst (MOVLload src mem) mem))
+       // match: (XORLconst [c] (MOVLconst [d]))
+       // cond:
+       // result: (MOVLconst [c^d])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 7) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != Op386MOVLconst {
                        break
                }
-               v.reset(Op386MOVLstore)
-               v.AuxInt = 3
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v0.AuxInt = 3
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               d := v_0.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = c ^ d
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 8
-       // result: (MOVLstore [4] dst (MOVLload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
+       return false
+}
+func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add16  x y)
+       // cond:
+       // result: (ADDL  x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 8) {
-                       break
-               }
-               v.reset(Op386MOVLstore)
-               v.AuxInt = 4
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v0.AuxInt = 4
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ADDL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0
-       // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4]              (ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4])           (ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4])           (MOVLstore dst (MOVLload src mem) mem))
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0) {
-                       break
-               }
-               v.reset(OpMove)
-               v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
-               v0 := b.NewValue0(v.Line, Op386ADDLconst, dst.Type)
-               v0.AddArg(dst)
-               v0.AuxInt = SizeAndAlign(s).Size() % 4
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386ADDLconst, src.Type)
-               v1.AddArg(src)
-               v1.AuxInt = SizeAndAlign(s).Size() % 4
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-               v2.AddArg(dst)
-               v3 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v3.AddArg(src)
-               v3.AddArg(mem)
-               v2.AddArg(v3)
-               v2.AddArg(mem)
-               v.AddArg(v2)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0         && !config.noDuffDevice
-       // result: (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
-                       break
-               }
-               v.reset(Op386DUFFCOPY)
-               v.AuxInt = 10 * (128 - SizeAndAlign(s).Size()/4)
-               v.AddArg(dst)
-               v.AddArg(src)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0
-       // result: (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !((SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0) {
-                       break
-               }
-               v.reset(Op386REPMOVSL)
-               v.AddArg(dst)
-               v.AddArg(src)
-               v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
-               v0.AuxInt = SizeAndAlign(s).Size() / 4
-               v.AddArg(v0)
-               v.AddArg(mem)
-               return true
-       }
-       return false
 }
-func rewriteValue386_OpMul16(v *Value, config *Config) bool {
+func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul16  x y)
+       // match: (Add32  x y)
        // cond:
-       // result: (MULL  x y)
+       // result: (ADDL  x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386MULL)
+               v.reset(Op386ADDL)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpMul32(v *Value, config *Config) bool {
+func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32  x y)
+       // match: (Add32F x y)
        // cond:
-       // result: (MULL  x y)
+       // result: (ADDSS x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386MULL)
+               v.reset(Op386ADDSS)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
+func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32F x y)
+       // match: (Add32carry x y)
        // cond:
-       // result: (MULSS x y)
+       // result: (ADDLcarry x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386MULSS)
+               v.reset(Op386ADDLcarry)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
+func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32uhilo x y)
+       // match: (Add32withcarry x y c)
        // cond:
-       // result: (MULLQU x y)
+       // result: (ADCL x y c)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386MULLQU)
+               c := v.Args[2]
+               v.reset(Op386ADCL)
                v.AddArg(x)
                v.AddArg(y)
+               v.AddArg(c)
                return true
        }
 }
-func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
+func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul64F x y)
+       // match: (Add64F x y)
        // cond:
-       // result: (MULSD x y)
+       // result: (ADDSD x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386MULSD)
+               v.reset(Op386ADDSD)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpMul8(v *Value, config *Config) bool {
+func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul8   x y)
+       // match: (Add8   x y)
        // cond:
-       // result: (MULL  x y)
+       // result: (ADDL  x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386MULL)
+               v.reset(Op386ADDL)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_Op386NEGL(v *Value, config *Config) bool {
+func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NEGL (MOVLconst [c]))
+       // match: (AddPtr x y)
        // cond:
-       // result: (MOVLconst [int64(int32(-c))])
+       // result: (ADDL  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = int64(int32(-c))
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ADDL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386NOTL(v *Value, config *Config) bool {
+func rewriteValue386_OpAddr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NOTL (MOVLconst [c]))
+       // match: (Addr {sym} base)
        // cond:
-       // result: (MOVLconst [^c])
+       // result: (LEAL {sym} base)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = ^c
+               sym := v.Aux
+               base := v.Args[0]
+               v.reset(Op386LEAL)
+               v.Aux = sym
+               v.AddArg(base)
                return true
        }
-       return false
 }
-func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
+func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg16  x)
+       // match: (And16 x y)
        // cond:
-       // result: (NEGL x)
+       // result: (ANDL x y)
        for {
                x := v.Args[0]
-               v.reset(Op386NEGL)
+               y := v.Args[1]
+               v.reset(Op386ANDL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
+func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32  x)
+       // match: (And32 x y)
        // cond:
-       // result: (NEGL x)
+       // result: (ANDL x y)
        for {
                x := v.Args[0]
-               v.reset(Op386NEGL)
+               y := v.Args[1]
+               v.reset(Op386ANDL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
+func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32F x)
+       // match: (And8  x y)
        // cond:
-       // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+       // result: (ANDL x y)
        for {
                x := v.Args[0]
-               v.reset(Op386PXOR)
+               y := v.Args[1]
+               v.reset(Op386ANDL)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386MOVSSconst, config.Frontend().TypeFloat32())
-               v0.AuxInt = f2i(math.Copysign(0, -1))
-               v.AddArg(v0)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
+func rewriteValue386_OpAndB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg64F x)
+       // match: (AndB x y)
        // cond:
-       // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+       // result: (ANDL x y)
        for {
                x := v.Args[0]
-               v.reset(Op386PXOR)
+               y := v.Args[1]
+               v.reset(Op386ANDL)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386MOVSDconst, config.Frontend().TypeFloat64())
-               v0.AuxInt = f2i(math.Copysign(0, -1))
-               v.AddArg(v0)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
+func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg8   x)
+       // match: (Bswap32 x)
        // cond:
-       // result: (NEGL x)
+       // result: (BSWAPL x)
        for {
                x := v.Args[0]
-               v.reset(Op386NEGL)
+               v.reset(Op386BSWAPL)
                v.AddArg(x)
                return true
        }
 }
-func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
+func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq16  x y)
+       // match: (ClosureCall [argwid] entry closure mem)
        // cond:
-       // result: (SETNE (CMPW x y))
+       // result: (CALLclosure [argwid] entry closure mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETNE)
-               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               closure := v.Args[1]
+               mem := v.Args[2]
+               v.reset(Op386CALLclosure)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(closure)
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
+func rewriteValue386_OpCom16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq32  x y)
+       // match: (Com16 x)
        // cond:
-       // result: (SETNE (CMPL x y))
+       // result: (NOTL x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETNE)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(Op386NOTL)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValue386_OpCom32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq32F x y)
+       // match: (Com32 x)
        // cond:
-       // result: (SETNEF (UCOMISS x y))
+       // result: (NOTL x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETNEF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(Op386NOTL)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValue386_OpCom8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq64F x y)
+       // match: (Com8  x)
        // cond:
-       // result: (SETNEF (UCOMISD x y))
+       // result: (NOTL x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETNEF)
-               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(Op386NOTL)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
+func rewriteValue386_OpConst16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq8   x y)
+       // match: (Const16  [val])
        // cond:
-       // result: (SETNE (CMPB x y))
+       // result: (MOVLconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETNE)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               val := v.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
+func rewriteValue386_OpConst32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NeqB   x y)
+       // match: (Const32  [val])
        // cond:
-       // result: (SETNE (CMPB x y))
+       // result: (MOVLconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETNE)
-               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               val := v.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NeqPtr x y)
+       // match: (Const32F [val])
        // cond:
-       // result: (SETNE (CMPL x y))
+       // result: (MOVSSconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SETNE)
-               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               val := v.AuxInt
+               v.reset(Op386MOVSSconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
+func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NilCheck ptr mem)
+       // match: (Const64F [val])
        // cond:
-       // result: (LoweredNilCheck ptr mem)
+       // result: (MOVSDconst [val])
        for {
-               ptr := v.Args[0]
+               val := v.AuxInt
+               v.reset(Op386MOVSDconst)
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValue386_OpConst8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const8   [val])
+       // cond:
+       // result: (MOVLconst [val])
+       for {
+               val := v.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ConstBool [b])
+       // cond:
+       // result: (MOVLconst [b])
+       for {
+               b := v.AuxInt
+               v.reset(Op386MOVLconst)
+               v.AuxInt = b
+               return true
+       }
+}
+func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ConstNil)
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v.reset(Op386MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValue386_OpConvert(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Convert <t> x mem)
+       // cond:
+       // result: (MOVLconvert <t> x mem)
+       for {
+               t := v.Type
+               x := v.Args[0]
                mem := v.Args[1]
-               v.reset(Op386LoweredNilCheck)
-               v.AddArg(ptr)
+               v.reset(Op386MOVLconvert)
+               v.Type = t
+               v.AddArg(x)
                v.AddArg(mem)
                return true
        }
 }
-func rewriteValue386_OpNot(v *Value, config *Config) bool {
+func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Not x)
+       // match: (Cvt32Fto32 x)
        // cond:
-       // result: (XORLconst [1] x)
+       // result: (CVTTSS2SL x)
        for {
                x := v.Args[0]
-               v.reset(Op386XORLconst)
-               v.AuxInt = 1
+               v.reset(Op386CVTTSS2SL)
                v.AddArg(x)
                return true
        }
 }
-func rewriteValue386_Op386ORL(v *Value, config *Config) bool {
+func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORL x (MOVLconst [c]))
+       // match: (Cvt32Fto64F x)
        // cond:
-       // result: (ORLconst [c] x)
+       // result: (CVTSS2SD x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386ORLconst)
-               v.AuxInt = c
+               v.reset(Op386CVTSS2SD)
                v.AddArg(x)
                return true
        }
-       // match: (ORL (MOVLconst [c]) x)
+}
+func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32to32F x)
        // cond:
-       // result: (ORLconst [c] x)
+       // result: (CVTSL2SS x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(Op386ORLconst)
-               v.AuxInt = c
+               x := v.Args[0]
+               v.reset(Op386CVTSL2SS)
                v.AddArg(x)
                return true
        }
-       // match: (ORL x x)
+}
+func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32to64F x)
        // cond:
-       // result: x
+       // result: (CVTSL2SD x)
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               v.reset(Op386CVTSL2SD)
                v.AddArg(x)
                return true
        }
-       // match: (ORL                  x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-       // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
+}
+func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt64Fto32 x)
+       // cond:
+       // result: (CVTTSD2SL x)
        for {
-               x0 := v.Args[0]
-               if x0.Op != Op386MOVBload {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               mem := x0.Args[1]
-               s0 := v.Args[1]
-               if s0.Op != Op386SHLLconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != Op386MOVBload {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if mem != x1.Args[1] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1)
-               v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(mem)
+               x := v.Args[0]
+               v.reset(Op386CVTTSD2SL)
+               v.AddArg(x)
                return true
        }
-       // match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
-       // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem)
+}
+func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt64Fto32F x)
+       // cond:
+       // result: (CVTSD2SS x)
        for {
-               o0 := v.Args[0]
-               if o0.Op != Op386ORL {
-                       break
-               }
-               o1 := o0.Args[0]
-               if o1.Op != Op386ORL {
-                       break
-               }
-               x0 := o1.Args[0]
-               if x0.Op != Op386MOVBload {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               mem := x0.Args[1]
-               s0 := o1.Args[1]
-               if s0.Op != Op386SHLLconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != Op386MOVBload {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if mem != x1.Args[1] {
-                       break
-               }
-               s1 := o0.Args[1]
-               if s1.Op != Op386SHLLconst {
-                       break
-               }
-               if s1.AuxInt != 16 {
-                       break
-               }
-               x2 := s1.Args[0]
-               if x2.Op != Op386MOVBload {
-                       break
-               }
-               if x2.AuxInt != i+2 {
-                       break
-               }
-               if x2.Aux != s {
-                       break
-               }
-               if p != x2.Args[0] {
-                       break
-               }
-               if mem != x2.Args[1] {
-                       break
-               }
-               s2 := v.Args[1]
-               if s2.Op != Op386SHLLconst {
-                       break
-               }
-               if s2.AuxInt != 24 {
-                       break
-               }
-               x3 := s2.Args[0]
-               if x3.Op != Op386MOVBload {
-                       break
-               }
-               if x3.AuxInt != i+3 {
-                       break
-               }
-               if x3.Aux != s {
-                       break
-               }
-               if p != x3.Args[0] {
-                       break
-               }
-               if mem != x3.Args[1] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1, x2, x3)
-               v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-               v.reset(OpCopy)
+               x := v.Args[0]
+               v.reset(Op386CVTSD2SS)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValue386_OpDeferCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (DeferCall [argwid] mem)
+       // cond:
+       // result: (CALLdefer [argwid] mem)
+       for {
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(Op386CALLdefer)
+               v.AuxInt = argwid
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div16  x y)
+       // cond:
+       // result: (DIVW  x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386DIVW)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div16u x y)
+       // cond:
+       // result: (DIVWU x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386DIVWU)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div32  x y)
+       // cond:
+       // result: (DIVL  x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386DIVL)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div32F x y)
+       // cond:
+       // result: (DIVSS x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386DIVSS)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div32u x y)
+       // cond:
+       // result: (DIVLU x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386DIVLU)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div64F x y)
+       // cond:
+       // result: (DIVSD x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386DIVSD)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div8   x y)
+       // cond:
+       // result: (DIVW  (SignExt8to16 x) (SignExt8to16 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386DIVW)
+               v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div8u  x y)
+       // cond:
+       // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386DIVWU)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValue386_OpEq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq16  x y)
+       // cond:
+       // result: (SETEQ (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETEQ)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpEq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32  x y)
+       // cond:
+       // result: (SETEQ (CMPL x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETEQ)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32F x y)
+       // cond:
+       // result: (SETEQF (UCOMISS x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETEQF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq64F x y)
+       // cond:
+       // result: (SETEQF (UCOMISD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETEQF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpEq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq8   x y)
+       // cond:
+       // result: (SETEQ (CMPB x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETEQ)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpEqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (EqB   x y)
+       // cond:
+       // result: (SETEQ (CMPB x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETEQ)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (EqPtr x y)
+       // cond:
+       // result: (SETEQ (CMPL x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETEQ)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq16  x y)
+       // cond:
+       // result: (SETGE (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETGE)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq16U x y)
+       // cond:
+       // result: (SETAE (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETAE)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32  x y)
+       // cond:
+       // result: (SETGE (CMPL x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETGE)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32F x y)
+       // cond:
+       // result: (SETGEF (UCOMISS x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETGEF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32U x y)
+       // cond:
+       // result: (SETAE (CMPL x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETAE)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq64F x y)
+       // cond:
+       // result: (SETGEF (UCOMISD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETGEF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(mem)
                return true
        }
-       // match: (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-       // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
+}
+func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq8   x y)
+       // cond:
+       // result: (SETGE (CMPB x y))
        for {
-               x0 := v.Args[0]
-               if x0.Op != Op386MOVBloadidx1 {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               idx := x0.Args[1]
-               mem := x0.Args[2]
-               s0 := v.Args[1]
-               if s0.Op != Op386SHLLconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != Op386MOVBloadidx1 {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if idx != x1.Args[1] {
-                       break
-               }
-               if mem != x1.Args[2] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1)
-               v0 := b.NewValue0(v.Line, Op386MOVWloadidx1, v.Type)
-               v.reset(OpCopy)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETGE)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
                return true
        }
-       // match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
-       // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
+}
+func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq8U  x y)
+       // cond:
+       // result: (SETAE (CMPB x y))
        for {
-               o0 := v.Args[0]
-               if o0.Op != Op386ORL {
-                       break
-               }
-               o1 := o0.Args[0]
-               if o1.Op != Op386ORL {
-                       break
-               }
-               x0 := o1.Args[0]
-               if x0.Op != Op386MOVBloadidx1 {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               idx := x0.Args[1]
-               mem := x0.Args[2]
-               s0 := o1.Args[1]
-               if s0.Op != Op386SHLLconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != Op386MOVBloadidx1 {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if idx != x1.Args[1] {
-                       break
-               }
-               if mem != x1.Args[2] {
-                       break
-               }
-               s1 := o0.Args[1]
-               if s1.Op != Op386SHLLconst {
-                       break
-               }
-               if s1.AuxInt != 16 {
-                       break
-               }
-               x2 := s1.Args[0]
-               if x2.Op != Op386MOVBloadidx1 {
-                       break
-               }
-               if x2.AuxInt != i+2 {
-                       break
-               }
-               if x2.Aux != s {
-                       break
-               }
-               if p != x2.Args[0] {
-                       break
-               }
-               if idx != x2.Args[1] {
-                       break
-               }
-               if mem != x2.Args[2] {
-                       break
-               }
-               s2 := v.Args[1]
-               if s2.Op != Op386SHLLconst {
-                       break
-               }
-               if s2.AuxInt != 24 {
-                       break
-               }
-               x3 := s2.Args[0]
-               if x3.Op != Op386MOVBloadidx1 {
-                       break
-               }
-               if x3.AuxInt != i+3 {
-                       break
-               }
-               if x3.Aux != s {
-                       break
-               }
-               if p != x3.Args[0] {
-                       break
-               }
-               if idx != x3.Args[1] {
-                       break
-               }
-               if mem != x3.Args[2] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1, x2, x3)
-               v0 := b.NewValue0(v.Line, Op386MOVLloadidx1, v.Type)
-               v.reset(OpCopy)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETAE)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GetClosurePtr)
+       // cond:
+       // result: (LoweredGetClosurePtr)
+       for {
+               v.reset(Op386LoweredGetClosurePtr)
+               return true
+       }
+}
+func rewriteValue386_OpGetG(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GetG mem)
+       // cond:
+       // result: (LoweredGetG mem)
+       for {
+               mem := v.Args[0]
+               v.reset(Op386LoweredGetG)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValue386_OpGoCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GoCall [argwid] mem)
+       // cond:
+       // result: (CALLgo [argwid] mem)
+       for {
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(Op386CALLgo)
+               v.AuxInt = argwid
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater16  x y)
+       // cond:
+       // result: (SETG (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETG)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater16U x y)
+       // cond:
+       // result: (SETA (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETA)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386ORLconst(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORLconst [c] x)
-       // cond: int32(c)==0
-       // result: x
+       // match: (Greater32  x y)
+       // cond:
+       // result: (SETG (CMPL x y))
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(int32(c) == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(Op386SETG)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORLconst [c] _)
-       // cond: int32(c)==-1
-       // result: (MOVLconst [-1])
+}
+func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32F x y)
+       // cond:
+       // result: (SETGF (UCOMISS x y))
        for {
-               c := v.AuxInt
-               if !(int32(c) == -1) {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = -1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETGF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORLconst [c] (MOVLconst [d]))
+}
+func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32U x y)
        // cond:
-       // result: (MOVLconst [c|d])
+       // result: (SETA (CMPL x y))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = c | d
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETA)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OffPtr [off] ptr)
+       // match: (Greater64F x y)
        // cond:
-       // result: (ADDLconst [off] ptr)
+       // result: (SETGF (UCOMISD x y))
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               v.reset(Op386ADDLconst)
-               v.AuxInt = off
-               v.AddArg(ptr)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETGF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValue386_OpOr16(v *Value, config *Config) bool {
+func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or16 x y)
+       // match: (Greater8   x y)
        // cond:
-       // result: (ORL x y)
+       // result: (SETG (CMPB x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ORL)
+               v.reset(Op386SETG)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater8U  x y)
+       // cond:
+       // result: (SETA (CMPB x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETA)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpHmul16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul16  x y)
+       // cond:
+       // result: (HMULW  x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386HMULW)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpOr32(v *Value, config *Config) bool {
+func rewriteValue386_OpHmul16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or32 x y)
+       // match: (Hmul16u x y)
        // cond:
-       // result: (ORL x y)
+       // result: (HMULWU x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ORL)
+               v.reset(Op386HMULWU)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpOr8(v *Value, config *Config) bool {
+func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or8  x y)
+       // match: (Hmul32  x y)
        // cond:
-       // result: (ORL x y)
+       // result: (HMULL  x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ORL)
+               v.reset(Op386HMULL)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_OpOrB(v *Value, config *Config) bool {
+func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OrB x y)
+       // match: (Hmul32u x y)
        // cond:
-       // result: (ORL x y)
+       // result: (HMULLU x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ORL)
+               v.reset(Op386HMULLU)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValue386_Op386ROLBconst(v *Value, config *Config) bool {
+func rewriteValue386_OpHmul8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ROLBconst [c] (ROLBconst [d] x))
+       // match: (Hmul8   x y)
        // cond:
-       // result: (ROLBconst [(c+d)& 7] x)
+       // result: (HMULB  x y)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ROLBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(Op386ROLBconst)
-               v.AuxInt = (c + d) & 7
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386HMULB)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (ROLBconst [0] x)
+}
+func rewriteValue386_OpHmul8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul8u  x y)
        // cond:
-       // result: x
+       // result: (HMULBU x y)
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v.Args[1]
+               v.reset(Op386HMULBU)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386ROLLconst(v *Value, config *Config) bool {
+func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ROLLconst [c] (ROLLconst [d] x))
+       // match: (InterCall [argwid] entry mem)
        // cond:
-       // result: (ROLLconst [(c+d)&31] x)
+       // result: (CALLinter [argwid] entry mem)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ROLLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(Op386ROLLconst)
-               v.AuxInt = (c + d) & 31
-               v.AddArg(x)
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               mem := v.Args[1]
+               v.reset(Op386CALLinter)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(mem)
                return true
        }
-       // match: (ROLLconst [0] x)
+}
+func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsInBounds idx len)
        // cond:
-       // result: x
+       // result: (SETB (CMPL idx len))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(Op386SETB)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsNonNil p)
+       // cond:
+       // result: (SETNE (TESTL p p))
+       for {
+               p := v.Args[0]
+               v.reset(Op386SETNE)
+               v0 := b.NewValue0(v.Line, Op386TESTL, TypeFlags)
+               v0.AddArg(p)
+               v0.AddArg(p)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386ROLWconst(v *Value, config *Config) bool {
+func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ROLWconst [c] (ROLWconst [d] x))
+       // match: (IsSliceInBounds idx len)
        // cond:
-       // result: (ROLWconst [(c+d)&15] x)
+       // result: (SETBE (CMPL idx len))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386ROLWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(Op386ROLWconst)
-               v.AuxInt = (c + d) & 15
-               v.AddArg(x)
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(Op386SETBE)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
                return true
        }
-       // match: (ROLWconst [0] x)
+}
+func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq16  x y)
        // cond:
-       // result: x
+       // result: (SETLE (CMPW x y))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(Op386SETLE)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux16 <t> x y)
+       // match: (Leq16U x y)
        // cond:
-       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+       // result: (SETBE (CMPW x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRW, t)
+               v.reset(Op386SETBE)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 16
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux32 <t> x y)
+       // match: (Leq32  x y)
        // cond:
-       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+       // result: (SETLE (CMPL x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRW, t)
+               v.reset(Op386SETLE)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 16
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SHRWconst x [c])
+       // match: (Leq32F x y)
+       // cond:
+       // result: (SETGEF (UCOMISS y x))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(Op386SHRWconst)
-               v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (Rsh16Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (Const16 [0])
-       for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
-                       break
-               }
-               v.reset(OpConst16)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(Op386SETGEF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux8  <t> x y)
+       // match: (Leq32U x y)
        // cond:
-       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+       // result: (SETBE (CMPL x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRW, t)
+               v.reset(Op386SETBE)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 16
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x16 <t> x y)
+       // match: (Leq64F x y)
        // cond:
-       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+       // result: (SETGEF (UCOMISD y x))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386SARW)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v.reset(Op386SETGEF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 16
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x32 <t> x y)
+       // match: (Leq8   x y)
        // cond:
-       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+       // result: (SETLE (CMPB x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386SARW)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v.reset(Op386SETLE)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 16
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SARWconst x [c])
+       // match: (Leq8U  x y)
+       // cond:
+       // result: (SETBE (CMPB x y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(Op386SARWconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               y := v.Args[1]
+               v.reset(Op386SETBE)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (Rsh16x64 x (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (SARWconst x [15])
+}
+func rewriteValue386_OpLess16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less16  x y)
+       // cond:
+       // result: (SETL (CMPW x y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
-                       break
-               }
-               v.reset(Op386SARWconst)
-               v.AddArg(x)
-               v.AuxInt = 15
+               y := v.Args[1]
+               v.reset(Op386SETL)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x8  <t> x y)
+       // match: (Less16U x y)
        // cond:
-       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+       // result: (SETB (CMPW x y))
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SARW)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 16
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
+               y := v.Args[1]
+               v.reset(Op386SETB)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValue386_OpLess32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux16 <t> x y)
+       // match: (Less32  x y)
        // cond:
-       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+       // result: (SETL (CMPL x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRL, t)
+               v.reset(Op386SETL)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux32 <t> x y)
+       // match: (Less32F x y)
        // cond:
-       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+       // result: (SETGF (UCOMISS y x))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRL, t)
-               v0.AddArg(x)
+               v.reset(Op386SETGF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
                v0.AddArg(y)
+               v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SHRLconst x [c])
+       // match: (Less32U x y)
+       // cond:
+       // result: (SETB (CMPL x y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(Op386SHRLconst)
-               v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (Rsh32Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (Const32 [0])
-       for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
-                       break
-               }
-               v.reset(OpConst32)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(Op386SETB)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux8  <t> x y)
+       // match: (Less64F x y)
        // cond:
-       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+       // result: (SETGF (UCOMISD y x))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRL, t)
-               v0.AddArg(x)
+               v.reset(Op386SETGF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
                v0.AddArg(y)
+               v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValue386_OpLess8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x16 <t> x y)
+       // match: (Less8   x y)
        // cond:
-       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+       // result: (SETL (CMPB x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386SARL)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v.reset(Op386SETL)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 32
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x32 <t> x y)
+       // match: (Less8U  x y)
        // cond:
-       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+       // result: (SETB (CMPB x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386SARL)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v.reset(Op386SETB)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 32
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValue386_OpLoad(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SARLconst x [c])
+       // match: (Load <t> ptr mem)
+       // cond: (is32BitInt(t) || isPtr(t))
+       // result: (MOVLload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitInt(t) || isPtr(t)) {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
+               v.reset(Op386MOVLload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is16BitInt(t)
+       // result: (MOVWload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t)) {
                        break
                }
-               v.reset(Op386SARLconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               v.reset(Op386MOVWload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (Rsh32x64 x (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (SARLconst x [31])
+       // match: (Load <t> ptr mem)
+       // cond: (t.IsBoolean() || is8BitInt(t))
+       // result: (MOVBload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(t.IsBoolean() || is8BitInt(t)) {
                        break
                }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
+               v.reset(Op386MOVBload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is32BitFloat(t)
+       // result: (MOVSSload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitFloat(t)) {
                        break
                }
-               v.reset(Op386SARLconst)
+               v.reset(Op386MOVSSload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is64BitFloat(t)
+       // result: (MOVSDload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitFloat(t)) {
+                       break
+               }
+               v.reset(Op386MOVSDload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValue386_OpLrot16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot16 <t> x [c])
+       // cond:
+       // result: (ROLWconst <t> [c&15] x)
+       for {
+               t := v.Type
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(Op386ROLWconst)
+               v.Type = t
+               v.AuxInt = c & 15
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValue386_OpLrot32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot32 <t> x [c])
+       // cond:
+       // result: (ROLLconst <t> [c&31] x)
+       for {
+               t := v.Type
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(Op386ROLLconst)
+               v.Type = t
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AuxInt = 31
                return true
        }
-       return false
 }
-func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValue386_OpLrot8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x8  <t> x y)
+       // match: (Lrot8  <t> x [c])
        // cond:
-       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+       // result: (ROLBconst <t> [c&7] x)
        for {
                t := v.Type
+               c := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SARL)
+               v.reset(Op386ROLBconst)
                v.Type = t
+               v.AuxInt = c & 7
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 32
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
-               v.AddArg(v0)
                return true
        }
 }
-func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux16 <t> x y)
+       // match: (Lsh16x16 <t> x y)
        // cond:
-       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
        for {
                t := v.Type
                x := v.Args[0]
                y := v.Args[1]
                v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRB, t)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
                v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v2.AuxInt = 32
                v2.AddArg(y)
-               v2.AuxInt = 8
                v1.AddArg(v2)
                v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux32 <t> x y)
+       // match: (Lsh16x32 <t> x y)
        // cond:
-       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
        for {
                t := v.Type
                x := v.Args[0]
                y := v.Args[1]
                v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRB, t)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
                v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v2.AuxInt = 32
                v2.AddArg(y)
-               v2.AuxInt = 8
                v1.AddArg(v2)
                v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SHRBconst x [c])
+       // match: (Lsh16x64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SHLLconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -10773,1496 +10616,1749 @@ func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) < 8) {
+               if !(uint64(c) < 16) {
                        break
                }
-               v.reset(Op386SHRBconst)
-               v.AddArg(x)
+               v.reset(Op386SHLLconst)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh8Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (Const8 [0])
+       // match: (Lsh16x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (Const16 [0])
        for {
                v_1 := v.Args[1]
                if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
+               if !(uint64(c) >= 16) {
                        break
                }
-               v.reset(OpConst8)
+               v.reset(OpConst16)
                v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux8  <t> x y)
+       // match: (Lsh16x8  <t> x y)
        // cond:
-       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
        for {
                t := v.Type
                x := v.Args[0]
                y := v.Args[1]
                v.reset(Op386ANDL)
-               v0 := b.NewValue0(v.Line, Op386SHRB, t)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
                v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v2.AuxInt = 32
                v2.AddArg(y)
-               v2.AuxInt = 8
                v1.AddArg(v2)
                v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x16 <t> x y)
+       // match: (Lsh32x16 <t> x y)
        // cond:
-       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
        for {
                t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386SARB)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 8
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh8x32 <t> x y)
-       // cond:
-       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
-       for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(Op386SARB)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 8
-               v2.AddArg(v3)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
                v1.AddArg(v2)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Rsh8x64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SARBconst x [c])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
-                       break
-               }
-               v.reset(Op386SARBconst)
-               v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (Rsh8x64 x (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (SARBconst x [7])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
-                       break
-               }
-               v.reset(Op386SARBconst)
-               v.AddArg(x)
-               v.AuxInt = 7
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x8  <t> x y)
+       // match: (Lsh32x32 <t> x y)
        // cond:
-       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
        for {
                t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(Op386SARB)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 8
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValue386_Op386SARB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SARB x (MOVLconst [c]))
-       // cond:
-       // result: (SARBconst [c&31] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SARBconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
-               return true
-       }
-       // match: (SARB x (MOVLconst [c]))
-       // cond:
-       // result: (SARBconst [c&31] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SARBconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386SARBconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SARBconst [c] (MOVLconst [d]))
-       // cond:
-       // result: (MOVLconst [d>>uint64(c)])
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = d >> uint64(c)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386SARL(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARL x (MOVLconst [c]))
-       // cond:
-       // result: (SARLconst [c&31] x)
+       // match: (Lsh32x64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SHLLconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               v.reset(Op386SARLconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
-               return true
-       }
-       // match: (SARL x (MOVLconst [c]))
-       // cond:
-       // result: (SARLconst [c&31] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if !(uint64(c) < 32) {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(Op386SARLconst)
-               v.AuxInt = c & 31
+               v.reset(Op386SHLLconst)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (SARL x (ANDLconst [31] y))
-       // cond:
-       // result: (SARL x y)
+       // match: (Lsh32x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (Const32 [0])
        for {
-               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386ANDLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
-               if v_1.AuxInt != 31 {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(Op386SARL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpConst32)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValue386_Op386SARLconst(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARLconst [c] (MOVLconst [d]))
+       // match: (Lsh32x8  <t> x y)
        // cond:
-       // result: (MOVLconst [d>>uint64(c)])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = d >> uint64(c)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SARW(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARW x (MOVLconst [c]))
-       // cond:
-       // result: (SARWconst [c&31] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SARWconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
-               return true
-       }
-       // match: (SARW x (MOVLconst [c]))
+       // match: (Lsh8x16 <t> x y)
        // cond:
-       // result: (SARWconst [c&31] x)
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SARWconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SARWconst(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARWconst [c] (MOVLconst [d]))
+       // match: (Lsh8x32 <t> x y)
        // cond:
-       // result: (MOVLconst [d>>uint64(c)])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = d >> uint64(c)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SBBL(v *Value, config *Config) bool {
+func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBBL x (MOVLconst [c]) f)
-       // cond:
-       // result: (SBBLconst [c] x f)
+       // match: (Lsh8x64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SHLLconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               f := v.Args[2]
-               v.reset(Op386SBBLconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               v.AddArg(f)
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386SBBLcarrymask(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SBBLcarrymask (FlagEQ))
-       // cond:
-       // result: (MOVLconst [0])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
+               if !(uint64(c) < 8) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               v.reset(Op386SHLLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (SBBLcarrymask (FlagLT_ULT))
-       // cond:
-       // result: (MOVLconst [-1])
+       // match: (Lsh8x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (Const8 [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = -1
-               return true
-       }
-       // match: (SBBLcarrymask (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [0])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
                        break
                }
-               v.reset(Op386MOVLconst)
+               v.reset(OpConst8)
                v.AuxInt = 0
                return true
        }
-       // match: (SBBLcarrymask (FlagGT_ULT))
+       return false
+}
+func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x8  <t> x y)
        // cond:
-       // result: (MOVLconst [-1])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = -1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBLcarrymask (FlagGT_UGT))
+}
+func rewriteValue386_OpMod16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16  x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MODW  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MODW)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SETA(v *Value, config *Config) bool {
+func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETA (InvertFlags x))
+       // match: (Mod16u x y)
        // cond:
-       // result: (SETB x)
+       // result: (MODWU x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(Op386SETB)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MODWU)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETA (FlagEQ))
-       // cond:
-       // result: (MOVLconst [0])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (SETA (FlagLT_ULT))
+}
+func rewriteValue386_OpMod32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32  x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MODL  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MODL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETA (FlagLT_UGT))
+}
+func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32u x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (MODLU x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MODLU)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETA (FlagGT_ULT))
+}
+func rewriteValue386_OpMod8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8   x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MODW  (SignExt8to16 x) (SignExt8to16 y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MODW)
+               v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETA (FlagGT_UGT))
+}
+func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8u  x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MODWU)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SETAE(v *Value, config *Config) bool {
+func rewriteValue386_OpMove(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETAE (InvertFlags x))
-       // cond:
-       // result: (SETBE x)
+       // match: (Move [s] _ _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
+               s := v.AuxInt
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 0) {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(Op386SETBE)
-               v.AddArg(x)
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
                return true
        }
-       // match: (SETAE (FlagEQ))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore dst (MOVBload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 1) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               v.reset(Op386MOVBstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETAE (FlagLT_ULT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVWstore dst (MOVWload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               v.reset(Op386MOVWstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETAE (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVLstore dst (MOVLload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               v.reset(Op386MOVLstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETAE (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 3
+       // result: (MOVBstore [2] dst (MOVBload [2] src mem)            (MOVWstore dst (MOVWload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 3) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               v.reset(Op386MOVBstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386MOVWstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETAE (FlagGT_UGT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 5
+       // result: (MOVBstore [4] dst (MOVBload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 5) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               v.reset(Op386MOVBstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386SETB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SETB (InvertFlags x))
-       // cond:
-       // result: (SETA x)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 6
+       // result: (MOVWstore [4] dst (MOVWload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 6) {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(Op386SETA)
-               v.AddArg(x)
+               v.reset(Op386MOVWstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETB (FlagEQ))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 7
+       // result: (MOVLstore [3] dst (MOVLload [3] src mem)            (MOVLstore dst (MOVLload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 7) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               v.reset(Op386MOVLstore)
+               v.AuxInt = 3
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+               v0.AuxInt = 3
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETB (FlagLT_ULT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 8
+       // result: (MOVLstore [4] dst (MOVLload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 8) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               v.reset(Op386MOVLstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETB (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0
+       // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4]              (ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4])           (ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4])           (MOVLstore dst (MOVLload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               v.reset(OpMove)
+               v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
+               v0 := b.NewValue0(v.Line, Op386ADDLconst, dst.Type)
+               v0.AuxInt = SizeAndAlign(s).Size() % 4
+               v0.AddArg(dst)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386ADDLconst, src.Type)
+               v1.AuxInt = SizeAndAlign(s).Size() % 4
+               v1.AddArg(src)
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
+               v2.AddArg(dst)
+               v3 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
+               v3.AddArg(src)
+               v3.AddArg(mem)
+               v2.AddArg(v3)
+               v2.AddArg(mem)
+               v.AddArg(v2)
                return true
        }
-       // match: (SETB (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0         && !config.noDuffDevice
+       // result: (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               v.reset(Op386DUFFCOPY)
+               v.AuxInt = 10 * (128 - SizeAndAlign(s).Size()/4)
+               v.AddArg(dst)
+               v.AddArg(src)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETB (FlagGT_UGT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0
+       // result: (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !((SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               v.reset(Op386REPMOVSL)
+               v.AddArg(dst)
+               v.AddArg(src)
+               v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
+               v0.AuxInt = SizeAndAlign(s).Size() / 4
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValue386_Op386SETBE(v *Value, config *Config) bool {
+func rewriteValue386_OpMul16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETBE (InvertFlags x))
+       // match: (Mul16  x y)
        // cond:
-       // result: (SETAE x)
+       // result: (MULL  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(Op386SETAE)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MULL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETBE (FlagEQ))
+}
+func rewriteValue386_OpMul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32  x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (MULL  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MULL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETBE (FlagLT_ULT))
+}
+func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32F x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (MULSS x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MULSS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETBE (FlagLT_UGT))
+}
+func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32uhilo x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MULLQU x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MULLQU)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETBE (FlagGT_ULT))
+}
+func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul64F x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (MULSD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MULSD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETBE (FlagGT_UGT))
+}
+func rewriteValue386_OpMul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul8   x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MULL  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386MULL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SETEQ(v *Value, config *Config) bool {
+func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETEQ (InvertFlags x))
+       // match: (Neg16  x)
        // cond:
-       // result: (SETEQ x)
+       // result: (NEGL x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(Op386SETEQ)
+               x := v.Args[0]
+               v.reset(Op386NEGL)
                v.AddArg(x)
                return true
        }
-       // match: (SETEQ (FlagEQ))
+}
+func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32  x)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (NEGL x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               v.reset(Op386NEGL)
+               v.AddArg(x)
                return true
        }
-       // match: (SETEQ (FlagLT_ULT))
+}
+func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32F x)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               v.reset(Op386PXOR)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386MOVSSconst, config.Frontend().TypeFloat32())
+               v0.AuxInt = f2i(math.Copysign(0, -1))
+               v.AddArg(v0)
                return true
        }
-       // match: (SETEQ (FlagLT_UGT))
+}
+func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64F x)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               v.reset(Op386PXOR)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386MOVSDconst, config.Frontend().TypeFloat64())
+               v0.AuxInt = f2i(math.Copysign(0, -1))
+               v.AddArg(v0)
                return true
        }
-       // match: (SETEQ (FlagGT_ULT))
+}
+func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg8   x)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (NEGL x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               v.reset(Op386NEGL)
+               v.AddArg(x)
                return true
        }
-       // match: (SETEQ (FlagGT_UGT))
+}
+func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq16  x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (SETNE (CMPW x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETNE)
+               v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SETG(v *Value, config *Config) bool {
+func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETG (InvertFlags x))
+       // match: (Neq32  x y)
        // cond:
-       // result: (SETL x)
+       // result: (SETNE (CMPL x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(Op386SETL)
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETNE)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETG (FlagEQ))
+}
+func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32F x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (SETNEF (UCOMISS x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETNEF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETG (FlagLT_ULT))
+}
+func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64F x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (SETNEF (UCOMISD x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETNEF)
+               v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETG (FlagLT_UGT))
+}
+func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq8   x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (SETNE (CMPB x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETNE)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETG (FlagGT_ULT))
+}
+func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqB   x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SETNE (CMPB x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETNE)
+               v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETG (FlagGT_UGT))
+}
+func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqPtr x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SETNE (CMPL x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SETNE)
+               v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SETGE(v *Value, config *Config) bool {
+func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETGE (InvertFlags x))
+       // match: (NilCheck ptr mem)
        // cond:
-       // result: (SETLE x)
+       // result: (LoweredNilCheck ptr mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(Op386SETLE)
-               v.AddArg(x)
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               v.reset(Op386LoweredNilCheck)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETGE (FlagEQ))
+}
+func rewriteValue386_OpNot(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Not x)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (XORLconst [1] x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
-                       break
-               }
-               v.reset(Op386MOVLconst)
+               x := v.Args[0]
+               v.reset(Op386XORLconst)
                v.AuxInt = 1
+               v.AddArg(x)
                return true
        }
-       // match: (SETGE (FlagLT_ULT))
+}
+func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OffPtr [off] ptr)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ADDLconst [off] ptr)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               off := v.AuxInt
+               ptr := v.Args[0]
+               v.reset(Op386ADDLconst)
+               v.AuxInt = off
+               v.AddArg(ptr)
                return true
        }
-       // match: (SETGE (FlagLT_UGT))
+}
+func rewriteValue386_OpOr16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or16 x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ORL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ORL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETGE (FlagGT_ULT))
+}
+func rewriteValue386_OpOr32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or32 x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (ORL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ORL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETGE (FlagGT_UGT))
+}
+func rewriteValue386_OpOr8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or8  x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (ORL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ORL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SETL(v *Value, config *Config) bool {
+func rewriteValue386_OpOrB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETL (InvertFlags x))
+       // match: (OrB x y)
        // cond:
-       // result: (SETG x)
+       // result: (ORL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(Op386SETG)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ORL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETL (FlagEQ))
+}
+func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux16 <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRW, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v2.AuxInt = 16
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETL (FlagLT_ULT))
+}
+func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux32 <t> x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRW, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v2.AuxInt = 16
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETL (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [1])
+}
+func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SHRWconst x [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
+                       break
+               }
+               v.reset(Op386SHRWconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (SETL (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Rsh16Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (Const16 [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               v.reset(Op386MOVLconst)
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
+                       break
+               }
+               v.reset(OpConst16)
                v.AuxInt = 0
                return true
        }
-       // match: (SETL (FlagGT_UGT))
+       return false
+}
+func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux8  <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRW, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v2.AuxInt = 16
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SETLE(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETLE (InvertFlags x))
+       // match: (Rsh16x16 <t> x y)
        // cond:
-       // result: (SETGE x)
+       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(Op386SETGE)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SARW)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v3.AuxInt = 16
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETLE (FlagEQ))
+}
+func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x32 <t> x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SARW)
+               v.Type = t
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v3.AuxInt = 16
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETLE (FlagLT_ULT))
-       // cond:
-       // result: (MOVLconst [1])
+}
+func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SARWconst x [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
-               return true
-       }
-       // match: (SETLE (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               v.reset(Op386SARWconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (SETLE (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Rsh16x64 x (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (SARWconst x [15])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (SETLE (FlagGT_UGT))
-       // cond:
-       // result: (MOVLconst [0])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               v.reset(Op386SARWconst)
+               v.AuxInt = 15
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValue386_Op386SETNE(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETNE (InvertFlags x))
+       // match: (Rsh16x8  <t> x y)
        // cond:
-       // result: (SETNE x)
+       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(Op386SETNE)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SARW)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v3.AuxInt = 16
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETNE (FlagEQ))
+}
+func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux16 <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagEQ {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETNE (FlagLT_ULT))
+}
+func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux32 <t> x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_ULT {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETNE (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [1])
+}
+func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SHRLconst x [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagLT_UGT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
-               return true
-       }
-       // match: (SETNE (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_ULT {
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               v.reset(Op386SHRLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (SETNE (FlagGT_UGT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Rsh32Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (Const32 [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386FlagGT_UGT {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(OpConst32)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValue386_Op386SHLL(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHLL x (MOVLconst [c]))
+       // match: (Rsh32Ux8  <t> x y)
        // cond:
-       // result: (SHLLconst [c&31] x)
+       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SHLLconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SHLL x (MOVLconst [c]))
+}
+func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x16 <t> x y)
        // cond:
-       // result: (SHLLconst [c&31] x)
+       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SHLLconst)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(Op386SARL)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v3.AuxInt = 32
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SHLL x (ANDLconst [31] y))
+}
+func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x32 <t> x y)
        // cond:
-       // result: (SHLL x y)
+       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ANDLconst {
-                       break
-               }
-               if v_1.AuxInt != 31 {
-                       break
-               }
-               y := v_1.Args[0]
-               v.reset(Op386SHLL)
+               y := v.Args[1]
+               v.reset(Op386SARL)
+               v.Type = t
                v.AddArg(x)
-               v.AddArg(y)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v3.AuxInt = 32
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SHRB(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHRB x (MOVLconst [c]))
-       // cond:
-       // result: (SHRBconst [c&31] x)
+       // match: (Rsh32x64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SARLconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               v.reset(Op386SHRBconst)
-               v.AuxInt = c & 31
+               if !(uint64(c) < 32) {
+                       break
+               }
+               v.reset(Op386SARLconst)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (SHRB x (MOVLconst [c]))
-       // cond:
-       // result: (SHRBconst [c&31] x)
+       // match: (Rsh32x64 x (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (SARLconst x [31])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               v.reset(Op386SHRBconst)
-               v.AuxInt = c & 31
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(Op386SARLconst)
+               v.AuxInt = 31
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValue386_Op386SHRL(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHRL x (MOVLconst [c]))
+       // match: (Rsh32x8  <t> x y)
        // cond:
-       // result: (SHRLconst [c&31] x)
+       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SHRLconst)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(Op386SARL)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v3.AuxInt = 32
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SHRL x (MOVLconst [c]))
+}
+func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux16 <t> x y)
        // cond:
-       // result: (SHRLconst [c&31] x)
+       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SHRLconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRB, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v2.AuxInt = 8
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SHRL x (ANDLconst [31] y))
+}
+func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux32 <t> x y)
        // cond:
-       // result: (SHRL x y)
+       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386ANDLconst {
-                       break
-               }
-               if v_1.AuxInt != 31 {
-                       break
-               }
-               y := v_1.Args[0]
-               v.reset(Op386SHRL)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRB, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v2.AuxInt = 8
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SHRW(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHRW x (MOVLconst [c]))
-       // cond:
-       // result: (SHRWconst [c&31] x)
+       // match: (Rsh8Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SHRBconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               v.reset(Op386SHRWconst)
-               v.AuxInt = c & 31
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(Op386SHRBconst)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (SHRW x (MOVLconst [c]))
-       // cond:
-       // result: (SHRWconst [c&31] x)
+       // match: (Rsh8Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (Const8 [0])
        for {
-               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               v.reset(Op386SHRWconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               if !(uint64(c) >= 8) {
+                       break
+               }
+               v.reset(OpConst8)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValue386_Op386SUBL(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBL x (MOVLconst [c]))
+       // match: (Rsh8Ux8  <t> x y)
        // cond:
-       // result: (SUBLconst x [c])
+       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386SUBLconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               y := v.Args[1]
+               v.reset(Op386ANDL)
+               v0 := b.NewValue0(v.Line, Op386SHRB, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v2.AuxInt = 8
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUBL (MOVLconst [c]) x)
+}
+func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x16 <t> x y)
        // cond:
-       // result: (NEGL (SUBLconst <v.Type> x [c]))
+       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(Op386NEGL)
-               v0 := b.NewValue0(v.Line, Op386SUBLconst, v.Type)
-               v0.AddArg(x)
-               v0.AuxInt = c
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(Op386SARB)
+               v.Type = t
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
+               v3.AuxInt = 8
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
-       // match: (SUBL x x)
+}
+func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x32 <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(Op386SARB)
+               v.Type = t
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
+               v3.AuxInt = 8
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValue386_Op386SUBLcarry(v *Value, config *Config) bool {
+func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBLcarry x (MOVLconst [c]))
-       // cond:
-       // result: (SUBLconstcarry [c] x)
+       // match: (Rsh8x64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SARBconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               v.reset(Op386SUBLconstcarry)
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(Op386SARBconst)
                v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValue386_Op386SUBLconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SUBLconst [c] x)
-       // cond: int32(c) == 0
-       // result: x
+       // match: (Rsh8x64 x (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (SARBconst x [7])
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(int32(c) == 0) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
+                       break
+               }
+               v.reset(Op386SARBconst)
+               v.AuxInt = 7
                v.AddArg(x)
                return true
        }
-       // match: (SUBLconst [c] x)
+       return false
+}
+func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x8  <t> x y)
        // cond:
-       // result: (ADDLconst [int64(int32(-c))] x)
+       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
        for {
-               c := v.AuxInt
+               t := v.Type
                x := v.Args[0]
-               v.reset(Op386ADDLconst)
-               v.AuxInt = int64(int32(-c))
+               y := v.Args[1]
+               v.reset(Op386SARB)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
+               v3.AuxInt = 8
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
 }
@@ -12314,8 +12410,8 @@ func rewriteValue386_OpSignmask(v *Value, config *Config) bool {
        for {
                x := v.Args[0]
                v.reset(Op386SARLconst)
-               v.AddArg(x)
                v.AuxInt = 31
+               v.AddArg(x)
                return true
        }
 }
@@ -12604,102 +12700,6 @@ func rewriteValue386_OpTrunc32to8(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (XORL x (MOVLconst [c]))
-       // cond:
-       // result: (XORLconst [c] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(Op386XORLconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORL (MOVLconst [c]) x)
-       // cond:
-       // result: (XORLconst [c] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(Op386XORLconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORL x x)
-       // cond:
-       // result: (MOVLconst [0])
-       for {
-               x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(Op386MOVLconst)
-               v.AuxInt = 0
-               return true
-       }
-       return false
-}
-func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (XORLconst [c] (XORLconst [d] x))
-       // cond:
-       // result: (XORLconst [c ^ d] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386XORLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(Op386XORLconst)
-               v.AuxInt = c ^ d
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORLconst [c] x)
-       // cond: int32(c)==0
-       // result: x
-       for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(int32(c) == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORLconst [c] (MOVLconst [d]))
-       // cond:
-       // result: (MOVLconst [c^d])
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != Op386MOVLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(Op386MOVLconst)
-               v.AuxInt = c ^ d
-               return true
-       }
-       return false
-}
 func rewriteValue386_OpXor16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
@@ -12903,8 +12903,8 @@ func rewriteValue386_OpZero(v *Value, config *Config) bool {
                v.reset(OpZero)
                v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
                v0 := b.NewValue0(v.Line, Op386ADDLconst, config.fe.TypeUInt32())
-               v0.AddArg(destptr)
                v0.AuxInt = SizeAndAlign(s).Size() % 4
+               v0.AddArg(destptr)
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
                v1.AuxInt = 0
index 154e1e923174e1a759a009725df1f89cc92c0153..19f01a0148a2d6b0980aee1abc557e030510c19a 100644 (file)
@@ -24,38 +24,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64ANDQ(v, config)
        case OpAMD64ANDQconst:
                return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
-       case OpAdd16:
-               return rewriteValueAMD64_OpAdd16(v, config)
-       case OpAdd32:
-               return rewriteValueAMD64_OpAdd32(v, config)
-       case OpAdd32F:
-               return rewriteValueAMD64_OpAdd32F(v, config)
-       case OpAdd64:
-               return rewriteValueAMD64_OpAdd64(v, config)
-       case OpAdd64F:
-               return rewriteValueAMD64_OpAdd64F(v, config)
-       case OpAdd8:
-               return rewriteValueAMD64_OpAdd8(v, config)
-       case OpAddPtr:
-               return rewriteValueAMD64_OpAddPtr(v, config)
-       case OpAddr:
-               return rewriteValueAMD64_OpAddr(v, config)
-       case OpAnd16:
-               return rewriteValueAMD64_OpAnd16(v, config)
-       case OpAnd32:
-               return rewriteValueAMD64_OpAnd32(v, config)
-       case OpAnd64:
-               return rewriteValueAMD64_OpAnd64(v, config)
-       case OpAnd8:
-               return rewriteValueAMD64_OpAnd8(v, config)
-       case OpAndB:
-               return rewriteValueAMD64_OpAndB(v, config)
-       case OpAvg64u:
-               return rewriteValueAMD64_OpAvg64u(v, config)
-       case OpBswap32:
-               return rewriteValueAMD64_OpBswap32(v, config)
-       case OpBswap64:
-               return rewriteValueAMD64_OpBswap64(v, config)
        case OpAMD64CMOVLEQconst:
                return rewriteValueAMD64_OpAMD64CMOVLEQconst(v, config)
        case OpAMD64CMOVQEQconst:
@@ -78,168 +46,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64CMPW(v, config)
        case OpAMD64CMPWconst:
                return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
-       case OpClosureCall:
-               return rewriteValueAMD64_OpClosureCall(v, config)
-       case OpCom16:
-               return rewriteValueAMD64_OpCom16(v, config)
-       case OpCom32:
-               return rewriteValueAMD64_OpCom32(v, config)
-       case OpCom64:
-               return rewriteValueAMD64_OpCom64(v, config)
-       case OpCom8:
-               return rewriteValueAMD64_OpCom8(v, config)
-       case OpConst16:
-               return rewriteValueAMD64_OpConst16(v, config)
-       case OpConst32:
-               return rewriteValueAMD64_OpConst32(v, config)
-       case OpConst32F:
-               return rewriteValueAMD64_OpConst32F(v, config)
-       case OpConst64:
-               return rewriteValueAMD64_OpConst64(v, config)
-       case OpConst64F:
-               return rewriteValueAMD64_OpConst64F(v, config)
-       case OpConst8:
-               return rewriteValueAMD64_OpConst8(v, config)
-       case OpConstBool:
-               return rewriteValueAMD64_OpConstBool(v, config)
-       case OpConstNil:
-               return rewriteValueAMD64_OpConstNil(v, config)
-       case OpConvert:
-               return rewriteValueAMD64_OpConvert(v, config)
-       case OpCtz16:
-               return rewriteValueAMD64_OpCtz16(v, config)
-       case OpCtz32:
-               return rewriteValueAMD64_OpCtz32(v, config)
-       case OpCtz64:
-               return rewriteValueAMD64_OpCtz64(v, config)
-       case OpCvt32Fto32:
-               return rewriteValueAMD64_OpCvt32Fto32(v, config)
-       case OpCvt32Fto64:
-               return rewriteValueAMD64_OpCvt32Fto64(v, config)
-       case OpCvt32Fto64F:
-               return rewriteValueAMD64_OpCvt32Fto64F(v, config)
-       case OpCvt32to32F:
-               return rewriteValueAMD64_OpCvt32to32F(v, config)
-       case OpCvt32to64F:
-               return rewriteValueAMD64_OpCvt32to64F(v, config)
-       case OpCvt64Fto32:
-               return rewriteValueAMD64_OpCvt64Fto32(v, config)
-       case OpCvt64Fto32F:
-               return rewriteValueAMD64_OpCvt64Fto32F(v, config)
-       case OpCvt64Fto64:
-               return rewriteValueAMD64_OpCvt64Fto64(v, config)
-       case OpCvt64to32F:
-               return rewriteValueAMD64_OpCvt64to32F(v, config)
-       case OpCvt64to64F:
-               return rewriteValueAMD64_OpCvt64to64F(v, config)
-       case OpDeferCall:
-               return rewriteValueAMD64_OpDeferCall(v, config)
-       case OpDiv16:
-               return rewriteValueAMD64_OpDiv16(v, config)
-       case OpDiv16u:
-               return rewriteValueAMD64_OpDiv16u(v, config)
-       case OpDiv32:
-               return rewriteValueAMD64_OpDiv32(v, config)
-       case OpDiv32F:
-               return rewriteValueAMD64_OpDiv32F(v, config)
-       case OpDiv32u:
-               return rewriteValueAMD64_OpDiv32u(v, config)
-       case OpDiv64:
-               return rewriteValueAMD64_OpDiv64(v, config)
-       case OpDiv64F:
-               return rewriteValueAMD64_OpDiv64F(v, config)
-       case OpDiv64u:
-               return rewriteValueAMD64_OpDiv64u(v, config)
-       case OpDiv8:
-               return rewriteValueAMD64_OpDiv8(v, config)
-       case OpDiv8u:
-               return rewriteValueAMD64_OpDiv8u(v, config)
-       case OpEq16:
-               return rewriteValueAMD64_OpEq16(v, config)
-       case OpEq32:
-               return rewriteValueAMD64_OpEq32(v, config)
-       case OpEq32F:
-               return rewriteValueAMD64_OpEq32F(v, config)
-       case OpEq64:
-               return rewriteValueAMD64_OpEq64(v, config)
-       case OpEq64F:
-               return rewriteValueAMD64_OpEq64F(v, config)
-       case OpEq8:
-               return rewriteValueAMD64_OpEq8(v, config)
-       case OpEqB:
-               return rewriteValueAMD64_OpEqB(v, config)
-       case OpEqPtr:
-               return rewriteValueAMD64_OpEqPtr(v, config)
-       case OpGeq16:
-               return rewriteValueAMD64_OpGeq16(v, config)
-       case OpGeq16U:
-               return rewriteValueAMD64_OpGeq16U(v, config)
-       case OpGeq32:
-               return rewriteValueAMD64_OpGeq32(v, config)
-       case OpGeq32F:
-               return rewriteValueAMD64_OpGeq32F(v, config)
-       case OpGeq32U:
-               return rewriteValueAMD64_OpGeq32U(v, config)
-       case OpGeq64:
-               return rewriteValueAMD64_OpGeq64(v, config)
-       case OpGeq64F:
-               return rewriteValueAMD64_OpGeq64F(v, config)
-       case OpGeq64U:
-               return rewriteValueAMD64_OpGeq64U(v, config)
-       case OpGeq8:
-               return rewriteValueAMD64_OpGeq8(v, config)
-       case OpGeq8U:
-               return rewriteValueAMD64_OpGeq8U(v, config)
-       case OpGetClosurePtr:
-               return rewriteValueAMD64_OpGetClosurePtr(v, config)
-       case OpGetG:
-               return rewriteValueAMD64_OpGetG(v, config)
-       case OpGoCall:
-               return rewriteValueAMD64_OpGoCall(v, config)
-       case OpGreater16:
-               return rewriteValueAMD64_OpGreater16(v, config)
-       case OpGreater16U:
-               return rewriteValueAMD64_OpGreater16U(v, config)
-       case OpGreater32:
-               return rewriteValueAMD64_OpGreater32(v, config)
-       case OpGreater32F:
-               return rewriteValueAMD64_OpGreater32F(v, config)
-       case OpGreater32U:
-               return rewriteValueAMD64_OpGreater32U(v, config)
-       case OpGreater64:
-               return rewriteValueAMD64_OpGreater64(v, config)
-       case OpGreater64F:
-               return rewriteValueAMD64_OpGreater64F(v, config)
-       case OpGreater64U:
-               return rewriteValueAMD64_OpGreater64U(v, config)
-       case OpGreater8:
-               return rewriteValueAMD64_OpGreater8(v, config)
-       case OpGreater8U:
-               return rewriteValueAMD64_OpGreater8U(v, config)
-       case OpHmul16:
-               return rewriteValueAMD64_OpHmul16(v, config)
-       case OpHmul16u:
-               return rewriteValueAMD64_OpHmul16u(v, config)
-       case OpHmul32:
-               return rewriteValueAMD64_OpHmul32(v, config)
-       case OpHmul32u:
-               return rewriteValueAMD64_OpHmul32u(v, config)
-       case OpHmul64:
-               return rewriteValueAMD64_OpHmul64(v, config)
-       case OpHmul64u:
-               return rewriteValueAMD64_OpHmul64u(v, config)
-       case OpHmul8:
-               return rewriteValueAMD64_OpHmul8(v, config)
-       case OpHmul8u:
-               return rewriteValueAMD64_OpHmul8u(v, config)
-       case OpInterCall:
-               return rewriteValueAMD64_OpInterCall(v, config)
-       case OpIsInBounds:
-               return rewriteValueAMD64_OpIsInBounds(v, config)
-       case OpIsNonNil:
-               return rewriteValueAMD64_OpIsNonNil(v, config)
-       case OpIsSliceInBounds:
-               return rewriteValueAMD64_OpIsSliceInBounds(v, config)
        case OpAMD64LEAQ:
                return rewriteValueAMD64_OpAMD64LEAQ(v, config)
        case OpAMD64LEAQ1:
@@ -250,88 +56,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
        case OpAMD64LEAQ8:
                return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
-       case OpLeq16:
-               return rewriteValueAMD64_OpLeq16(v, config)
-       case OpLeq16U:
-               return rewriteValueAMD64_OpLeq16U(v, config)
-       case OpLeq32:
-               return rewriteValueAMD64_OpLeq32(v, config)
-       case OpLeq32F:
-               return rewriteValueAMD64_OpLeq32F(v, config)
-       case OpLeq32U:
-               return rewriteValueAMD64_OpLeq32U(v, config)
-       case OpLeq64:
-               return rewriteValueAMD64_OpLeq64(v, config)
-       case OpLeq64F:
-               return rewriteValueAMD64_OpLeq64F(v, config)
-       case OpLeq64U:
-               return rewriteValueAMD64_OpLeq64U(v, config)
-       case OpLeq8:
-               return rewriteValueAMD64_OpLeq8(v, config)
-       case OpLeq8U:
-               return rewriteValueAMD64_OpLeq8U(v, config)
-       case OpLess16:
-               return rewriteValueAMD64_OpLess16(v, config)
-       case OpLess16U:
-               return rewriteValueAMD64_OpLess16U(v, config)
-       case OpLess32:
-               return rewriteValueAMD64_OpLess32(v, config)
-       case OpLess32F:
-               return rewriteValueAMD64_OpLess32F(v, config)
-       case OpLess32U:
-               return rewriteValueAMD64_OpLess32U(v, config)
-       case OpLess64:
-               return rewriteValueAMD64_OpLess64(v, config)
-       case OpLess64F:
-               return rewriteValueAMD64_OpLess64F(v, config)
-       case OpLess64U:
-               return rewriteValueAMD64_OpLess64U(v, config)
-       case OpLess8:
-               return rewriteValueAMD64_OpLess8(v, config)
-       case OpLess8U:
-               return rewriteValueAMD64_OpLess8U(v, config)
-       case OpLoad:
-               return rewriteValueAMD64_OpLoad(v, config)
-       case OpLrot16:
-               return rewriteValueAMD64_OpLrot16(v, config)
-       case OpLrot32:
-               return rewriteValueAMD64_OpLrot32(v, config)
-       case OpLrot64:
-               return rewriteValueAMD64_OpLrot64(v, config)
-       case OpLrot8:
-               return rewriteValueAMD64_OpLrot8(v, config)
-       case OpLsh16x16:
-               return rewriteValueAMD64_OpLsh16x16(v, config)
-       case OpLsh16x32:
-               return rewriteValueAMD64_OpLsh16x32(v, config)
-       case OpLsh16x64:
-               return rewriteValueAMD64_OpLsh16x64(v, config)
-       case OpLsh16x8:
-               return rewriteValueAMD64_OpLsh16x8(v, config)
-       case OpLsh32x16:
-               return rewriteValueAMD64_OpLsh32x16(v, config)
-       case OpLsh32x32:
-               return rewriteValueAMD64_OpLsh32x32(v, config)
-       case OpLsh32x64:
-               return rewriteValueAMD64_OpLsh32x64(v, config)
-       case OpLsh32x8:
-               return rewriteValueAMD64_OpLsh32x8(v, config)
-       case OpLsh64x16:
-               return rewriteValueAMD64_OpLsh64x16(v, config)
-       case OpLsh64x32:
-               return rewriteValueAMD64_OpLsh64x32(v, config)
-       case OpLsh64x64:
-               return rewriteValueAMD64_OpLsh64x64(v, config)
-       case OpLsh64x8:
-               return rewriteValueAMD64_OpLsh64x8(v, config)
-       case OpLsh8x16:
-               return rewriteValueAMD64_OpLsh8x16(v, config)
-       case OpLsh8x32:
-               return rewriteValueAMD64_OpLsh8x32(v, config)
-       case OpLsh8x64:
-               return rewriteValueAMD64_OpLsh8x64(v, config)
-       case OpLsh8x8:
-               return rewriteValueAMD64_OpLsh8x8(v, config)
        case OpAMD64MOVBQSX:
                return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
        case OpAMD64MOVBQSXload:
@@ -452,36 +176,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64MULQ(v, config)
        case OpAMD64MULQconst:
                return rewriteValueAMD64_OpAMD64MULQconst(v, config)
-       case OpMod16:
-               return rewriteValueAMD64_OpMod16(v, config)
-       case OpMod16u:
-               return rewriteValueAMD64_OpMod16u(v, config)
-       case OpMod32:
-               return rewriteValueAMD64_OpMod32(v, config)
-       case OpMod32u:
-               return rewriteValueAMD64_OpMod32u(v, config)
-       case OpMod64:
-               return rewriteValueAMD64_OpMod64(v, config)
-       case OpMod64u:
-               return rewriteValueAMD64_OpMod64u(v, config)
-       case OpMod8:
-               return rewriteValueAMD64_OpMod8(v, config)
-       case OpMod8u:
-               return rewriteValueAMD64_OpMod8u(v, config)
-       case OpMove:
-               return rewriteValueAMD64_OpMove(v, config)
-       case OpMul16:
-               return rewriteValueAMD64_OpMul16(v, config)
-       case OpMul32:
-               return rewriteValueAMD64_OpMul32(v, config)
-       case OpMul32F:
-               return rewriteValueAMD64_OpMul32F(v, config)
-       case OpMul64:
-               return rewriteValueAMD64_OpMul64(v, config)
-       case OpMul64F:
-               return rewriteValueAMD64_OpMul64F(v, config)
-       case OpMul8:
-               return rewriteValueAMD64_OpMul8(v, config)
        case OpAMD64NEGL:
                return rewriteValueAMD64_OpAMD64NEGL(v, config)
        case OpAMD64NEGQ:
@@ -490,38 +184,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64NOTL(v, config)
        case OpAMD64NOTQ:
                return rewriteValueAMD64_OpAMD64NOTQ(v, config)
-       case OpNeg16:
-               return rewriteValueAMD64_OpNeg16(v, config)
-       case OpNeg32:
-               return rewriteValueAMD64_OpNeg32(v, config)
-       case OpNeg32F:
-               return rewriteValueAMD64_OpNeg32F(v, config)
-       case OpNeg64:
-               return rewriteValueAMD64_OpNeg64(v, config)
-       case OpNeg64F:
-               return rewriteValueAMD64_OpNeg64F(v, config)
-       case OpNeg8:
-               return rewriteValueAMD64_OpNeg8(v, config)
-       case OpNeq16:
-               return rewriteValueAMD64_OpNeq16(v, config)
-       case OpNeq32:
-               return rewriteValueAMD64_OpNeq32(v, config)
-       case OpNeq32F:
-               return rewriteValueAMD64_OpNeq32F(v, config)
-       case OpNeq64:
-               return rewriteValueAMD64_OpNeq64(v, config)
-       case OpNeq64F:
-               return rewriteValueAMD64_OpNeq64F(v, config)
-       case OpNeq8:
-               return rewriteValueAMD64_OpNeq8(v, config)
-       case OpNeqB:
-               return rewriteValueAMD64_OpNeqB(v, config)
-       case OpNeqPtr:
-               return rewriteValueAMD64_OpNeqPtr(v, config)
-       case OpNilCheck:
-               return rewriteValueAMD64_OpNilCheck(v, config)
-       case OpNot:
-               return rewriteValueAMD64_OpNot(v, config)
        case OpAMD64ORL:
                return rewriteValueAMD64_OpAMD64ORL(v, config)
        case OpAMD64ORLconst:
@@ -530,18 +192,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64ORQ(v, config)
        case OpAMD64ORQconst:
                return rewriteValueAMD64_OpAMD64ORQconst(v, config)
-       case OpOffPtr:
-               return rewriteValueAMD64_OpOffPtr(v, config)
-       case OpOr16:
-               return rewriteValueAMD64_OpOr16(v, config)
-       case OpOr32:
-               return rewriteValueAMD64_OpOr32(v, config)
-       case OpOr64:
-               return rewriteValueAMD64_OpOr64(v, config)
-       case OpOr8:
-               return rewriteValueAMD64_OpOr8(v, config)
-       case OpOrB:
-               return rewriteValueAMD64_OpOrB(v, config)
        case OpAMD64ROLBconst:
                return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
        case OpAMD64ROLLconst:
@@ -550,70 +200,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
        case OpAMD64ROLWconst:
                return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
-       case OpRsh16Ux16:
-               return rewriteValueAMD64_OpRsh16Ux16(v, config)
-       case OpRsh16Ux32:
-               return rewriteValueAMD64_OpRsh16Ux32(v, config)
-       case OpRsh16Ux64:
-               return rewriteValueAMD64_OpRsh16Ux64(v, config)
-       case OpRsh16Ux8:
-               return rewriteValueAMD64_OpRsh16Ux8(v, config)
-       case OpRsh16x16:
-               return rewriteValueAMD64_OpRsh16x16(v, config)
-       case OpRsh16x32:
-               return rewriteValueAMD64_OpRsh16x32(v, config)
-       case OpRsh16x64:
-               return rewriteValueAMD64_OpRsh16x64(v, config)
-       case OpRsh16x8:
-               return rewriteValueAMD64_OpRsh16x8(v, config)
-       case OpRsh32Ux16:
-               return rewriteValueAMD64_OpRsh32Ux16(v, config)
-       case OpRsh32Ux32:
-               return rewriteValueAMD64_OpRsh32Ux32(v, config)
-       case OpRsh32Ux64:
-               return rewriteValueAMD64_OpRsh32Ux64(v, config)
-       case OpRsh32Ux8:
-               return rewriteValueAMD64_OpRsh32Ux8(v, config)
-       case OpRsh32x16:
-               return rewriteValueAMD64_OpRsh32x16(v, config)
-       case OpRsh32x32:
-               return rewriteValueAMD64_OpRsh32x32(v, config)
-       case OpRsh32x64:
-               return rewriteValueAMD64_OpRsh32x64(v, config)
-       case OpRsh32x8:
-               return rewriteValueAMD64_OpRsh32x8(v, config)
-       case OpRsh64Ux16:
-               return rewriteValueAMD64_OpRsh64Ux16(v, config)
-       case OpRsh64Ux32:
-               return rewriteValueAMD64_OpRsh64Ux32(v, config)
-       case OpRsh64Ux64:
-               return rewriteValueAMD64_OpRsh64Ux64(v, config)
-       case OpRsh64Ux8:
-               return rewriteValueAMD64_OpRsh64Ux8(v, config)
-       case OpRsh64x16:
-               return rewriteValueAMD64_OpRsh64x16(v, config)
-       case OpRsh64x32:
-               return rewriteValueAMD64_OpRsh64x32(v, config)
-       case OpRsh64x64:
-               return rewriteValueAMD64_OpRsh64x64(v, config)
-       case OpRsh64x8:
-               return rewriteValueAMD64_OpRsh64x8(v, config)
-       case OpRsh8Ux16:
-               return rewriteValueAMD64_OpRsh8Ux16(v, config)
-       case OpRsh8Ux32:
-               return rewriteValueAMD64_OpRsh8Ux32(v, config)
-       case OpRsh8Ux64:
-               return rewriteValueAMD64_OpRsh8Ux64(v, config)
-       case OpRsh8Ux8:
-               return rewriteValueAMD64_OpRsh8Ux8(v, config)
-       case OpRsh8x16:
-               return rewriteValueAMD64_OpRsh8x16(v, config)
-       case OpRsh8x32:
-               return rewriteValueAMD64_OpRsh8x32(v, config)
-       case OpRsh8x64:
-               return rewriteValueAMD64_OpRsh8x64(v, config)
-       case OpRsh8x8:
-               return rewriteValueAMD64_OpRsh8x8(v, config)
        case OpAMD64SARB:
                return rewriteValueAMD64_OpAMD64SARB(v, config)
        case OpAMD64SARBconst:
@@ -674,50 +260,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64SUBQ(v, config)
        case OpAMD64SUBQconst:
                return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
-       case OpSignExt16to32:
-               return rewriteValueAMD64_OpSignExt16to32(v, config)
-       case OpSignExt16to64:
-               return rewriteValueAMD64_OpSignExt16to64(v, config)
-       case OpSignExt32to64:
-               return rewriteValueAMD64_OpSignExt32to64(v, config)
-       case OpSignExt8to16:
-               return rewriteValueAMD64_OpSignExt8to16(v, config)
-       case OpSignExt8to32:
-               return rewriteValueAMD64_OpSignExt8to32(v, config)
-       case OpSignExt8to64:
-               return rewriteValueAMD64_OpSignExt8to64(v, config)
-       case OpSqrt:
-               return rewriteValueAMD64_OpSqrt(v, config)
-       case OpStaticCall:
-               return rewriteValueAMD64_OpStaticCall(v, config)
-       case OpStore:
-               return rewriteValueAMD64_OpStore(v, config)
-       case OpSub16:
-               return rewriteValueAMD64_OpSub16(v, config)
-       case OpSub32:
-               return rewriteValueAMD64_OpSub32(v, config)
-       case OpSub32F:
-               return rewriteValueAMD64_OpSub32F(v, config)
-       case OpSub64:
-               return rewriteValueAMD64_OpSub64(v, config)
-       case OpSub64F:
-               return rewriteValueAMD64_OpSub64F(v, config)
-       case OpSub8:
-               return rewriteValueAMD64_OpSub8(v, config)
-       case OpSubPtr:
-               return rewriteValueAMD64_OpSubPtr(v, config)
-       case OpTrunc16to8:
-               return rewriteValueAMD64_OpTrunc16to8(v, config)
-       case OpTrunc32to16:
-               return rewriteValueAMD64_OpTrunc32to16(v, config)
-       case OpTrunc32to8:
-               return rewriteValueAMD64_OpTrunc32to8(v, config)
-       case OpTrunc64to16:
-               return rewriteValueAMD64_OpTrunc64to16(v, config)
-       case OpTrunc64to32:
-               return rewriteValueAMD64_OpTrunc64to32(v, config)
-       case OpTrunc64to8:
-               return rewriteValueAMD64_OpTrunc64to8(v, config)
        case OpAMD64XORL:
                return rewriteValueAMD64_OpAMD64XORL(v, config)
        case OpAMD64XORLconst:
@@ -726,60 +268,518 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
                return rewriteValueAMD64_OpAMD64XORQ(v, config)
        case OpAMD64XORQconst:
                return rewriteValueAMD64_OpAMD64XORQconst(v, config)
-       case OpXor16:
-               return rewriteValueAMD64_OpXor16(v, config)
-       case OpXor32:
-               return rewriteValueAMD64_OpXor32(v, config)
-       case OpXor64:
-               return rewriteValueAMD64_OpXor64(v, config)
-       case OpXor8:
-               return rewriteValueAMD64_OpXor8(v, config)
-       case OpZero:
-               return rewriteValueAMD64_OpZero(v, config)
-       case OpZeroExt16to32:
-               return rewriteValueAMD64_OpZeroExt16to32(v, config)
-       case OpZeroExt16to64:
-               return rewriteValueAMD64_OpZeroExt16to64(v, config)
-       case OpZeroExt32to64:
-               return rewriteValueAMD64_OpZeroExt32to64(v, config)
-       case OpZeroExt8to16:
-               return rewriteValueAMD64_OpZeroExt8to16(v, config)
-       case OpZeroExt8to32:
-               return rewriteValueAMD64_OpZeroExt8to32(v, config)
-       case OpZeroExt8to64:
-               return rewriteValueAMD64_OpZeroExt8to64(v, config)
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ADDL x (MOVLconst [c]))
-       // cond:
-       // result: (ADDLconst [c] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64ADDLconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (ADDL (MOVLconst [c]) x)
-       // cond:
-       // result: (ADDLconst [c] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpAMD64ADDLconst)
+       case OpAdd16:
+               return rewriteValueAMD64_OpAdd16(v, config)
+       case OpAdd32:
+               return rewriteValueAMD64_OpAdd32(v, config)
+       case OpAdd32F:
+               return rewriteValueAMD64_OpAdd32F(v, config)
+       case OpAdd64:
+               return rewriteValueAMD64_OpAdd64(v, config)
+       case OpAdd64F:
+               return rewriteValueAMD64_OpAdd64F(v, config)
+       case OpAdd8:
+               return rewriteValueAMD64_OpAdd8(v, config)
+       case OpAddPtr:
+               return rewriteValueAMD64_OpAddPtr(v, config)
+       case OpAddr:
+               return rewriteValueAMD64_OpAddr(v, config)
+       case OpAnd16:
+               return rewriteValueAMD64_OpAnd16(v, config)
+       case OpAnd32:
+               return rewriteValueAMD64_OpAnd32(v, config)
+       case OpAnd64:
+               return rewriteValueAMD64_OpAnd64(v, config)
+       case OpAnd8:
+               return rewriteValueAMD64_OpAnd8(v, config)
+       case OpAndB:
+               return rewriteValueAMD64_OpAndB(v, config)
+       case OpAvg64u:
+               return rewriteValueAMD64_OpAvg64u(v, config)
+       case OpBswap32:
+               return rewriteValueAMD64_OpBswap32(v, config)
+       case OpBswap64:
+               return rewriteValueAMD64_OpBswap64(v, config)
+       case OpClosureCall:
+               return rewriteValueAMD64_OpClosureCall(v, config)
+       case OpCom16:
+               return rewriteValueAMD64_OpCom16(v, config)
+       case OpCom32:
+               return rewriteValueAMD64_OpCom32(v, config)
+       case OpCom64:
+               return rewriteValueAMD64_OpCom64(v, config)
+       case OpCom8:
+               return rewriteValueAMD64_OpCom8(v, config)
+       case OpConst16:
+               return rewriteValueAMD64_OpConst16(v, config)
+       case OpConst32:
+               return rewriteValueAMD64_OpConst32(v, config)
+       case OpConst32F:
+               return rewriteValueAMD64_OpConst32F(v, config)
+       case OpConst64:
+               return rewriteValueAMD64_OpConst64(v, config)
+       case OpConst64F:
+               return rewriteValueAMD64_OpConst64F(v, config)
+       case OpConst8:
+               return rewriteValueAMD64_OpConst8(v, config)
+       case OpConstBool:
+               return rewriteValueAMD64_OpConstBool(v, config)
+       case OpConstNil:
+               return rewriteValueAMD64_OpConstNil(v, config)
+       case OpConvert:
+               return rewriteValueAMD64_OpConvert(v, config)
+       case OpCtz16:
+               return rewriteValueAMD64_OpCtz16(v, config)
+       case OpCtz32:
+               return rewriteValueAMD64_OpCtz32(v, config)
+       case OpCtz64:
+               return rewriteValueAMD64_OpCtz64(v, config)
+       case OpCvt32Fto32:
+               return rewriteValueAMD64_OpCvt32Fto32(v, config)
+       case OpCvt32Fto64:
+               return rewriteValueAMD64_OpCvt32Fto64(v, config)
+       case OpCvt32Fto64F:
+               return rewriteValueAMD64_OpCvt32Fto64F(v, config)
+       case OpCvt32to32F:
+               return rewriteValueAMD64_OpCvt32to32F(v, config)
+       case OpCvt32to64F:
+               return rewriteValueAMD64_OpCvt32to64F(v, config)
+       case OpCvt64Fto32:
+               return rewriteValueAMD64_OpCvt64Fto32(v, config)
+       case OpCvt64Fto32F:
+               return rewriteValueAMD64_OpCvt64Fto32F(v, config)
+       case OpCvt64Fto64:
+               return rewriteValueAMD64_OpCvt64Fto64(v, config)
+       case OpCvt64to32F:
+               return rewriteValueAMD64_OpCvt64to32F(v, config)
+       case OpCvt64to64F:
+               return rewriteValueAMD64_OpCvt64to64F(v, config)
+       case OpDeferCall:
+               return rewriteValueAMD64_OpDeferCall(v, config)
+       case OpDiv16:
+               return rewriteValueAMD64_OpDiv16(v, config)
+       case OpDiv16u:
+               return rewriteValueAMD64_OpDiv16u(v, config)
+       case OpDiv32:
+               return rewriteValueAMD64_OpDiv32(v, config)
+       case OpDiv32F:
+               return rewriteValueAMD64_OpDiv32F(v, config)
+       case OpDiv32u:
+               return rewriteValueAMD64_OpDiv32u(v, config)
+       case OpDiv64:
+               return rewriteValueAMD64_OpDiv64(v, config)
+       case OpDiv64F:
+               return rewriteValueAMD64_OpDiv64F(v, config)
+       case OpDiv64u:
+               return rewriteValueAMD64_OpDiv64u(v, config)
+       case OpDiv8:
+               return rewriteValueAMD64_OpDiv8(v, config)
+       case OpDiv8u:
+               return rewriteValueAMD64_OpDiv8u(v, config)
+       case OpEq16:
+               return rewriteValueAMD64_OpEq16(v, config)
+       case OpEq32:
+               return rewriteValueAMD64_OpEq32(v, config)
+       case OpEq32F:
+               return rewriteValueAMD64_OpEq32F(v, config)
+       case OpEq64:
+               return rewriteValueAMD64_OpEq64(v, config)
+       case OpEq64F:
+               return rewriteValueAMD64_OpEq64F(v, config)
+       case OpEq8:
+               return rewriteValueAMD64_OpEq8(v, config)
+       case OpEqB:
+               return rewriteValueAMD64_OpEqB(v, config)
+       case OpEqPtr:
+               return rewriteValueAMD64_OpEqPtr(v, config)
+       case OpGeq16:
+               return rewriteValueAMD64_OpGeq16(v, config)
+       case OpGeq16U:
+               return rewriteValueAMD64_OpGeq16U(v, config)
+       case OpGeq32:
+               return rewriteValueAMD64_OpGeq32(v, config)
+       case OpGeq32F:
+               return rewriteValueAMD64_OpGeq32F(v, config)
+       case OpGeq32U:
+               return rewriteValueAMD64_OpGeq32U(v, config)
+       case OpGeq64:
+               return rewriteValueAMD64_OpGeq64(v, config)
+       case OpGeq64F:
+               return rewriteValueAMD64_OpGeq64F(v, config)
+       case OpGeq64U:
+               return rewriteValueAMD64_OpGeq64U(v, config)
+       case OpGeq8:
+               return rewriteValueAMD64_OpGeq8(v, config)
+       case OpGeq8U:
+               return rewriteValueAMD64_OpGeq8U(v, config)
+       case OpGetClosurePtr:
+               return rewriteValueAMD64_OpGetClosurePtr(v, config)
+       case OpGetG:
+               return rewriteValueAMD64_OpGetG(v, config)
+       case OpGoCall:
+               return rewriteValueAMD64_OpGoCall(v, config)
+       case OpGreater16:
+               return rewriteValueAMD64_OpGreater16(v, config)
+       case OpGreater16U:
+               return rewriteValueAMD64_OpGreater16U(v, config)
+       case OpGreater32:
+               return rewriteValueAMD64_OpGreater32(v, config)
+       case OpGreater32F:
+               return rewriteValueAMD64_OpGreater32F(v, config)
+       case OpGreater32U:
+               return rewriteValueAMD64_OpGreater32U(v, config)
+       case OpGreater64:
+               return rewriteValueAMD64_OpGreater64(v, config)
+       case OpGreater64F:
+               return rewriteValueAMD64_OpGreater64F(v, config)
+       case OpGreater64U:
+               return rewriteValueAMD64_OpGreater64U(v, config)
+       case OpGreater8:
+               return rewriteValueAMD64_OpGreater8(v, config)
+       case OpGreater8U:
+               return rewriteValueAMD64_OpGreater8U(v, config)
+       case OpHmul16:
+               return rewriteValueAMD64_OpHmul16(v, config)
+       case OpHmul16u:
+               return rewriteValueAMD64_OpHmul16u(v, config)
+       case OpHmul32:
+               return rewriteValueAMD64_OpHmul32(v, config)
+       case OpHmul32u:
+               return rewriteValueAMD64_OpHmul32u(v, config)
+       case OpHmul64:
+               return rewriteValueAMD64_OpHmul64(v, config)
+       case OpHmul64u:
+               return rewriteValueAMD64_OpHmul64u(v, config)
+       case OpHmul8:
+               return rewriteValueAMD64_OpHmul8(v, config)
+       case OpHmul8u:
+               return rewriteValueAMD64_OpHmul8u(v, config)
+       case OpInterCall:
+               return rewriteValueAMD64_OpInterCall(v, config)
+       case OpIsInBounds:
+               return rewriteValueAMD64_OpIsInBounds(v, config)
+       case OpIsNonNil:
+               return rewriteValueAMD64_OpIsNonNil(v, config)
+       case OpIsSliceInBounds:
+               return rewriteValueAMD64_OpIsSliceInBounds(v, config)
+       case OpLeq16:
+               return rewriteValueAMD64_OpLeq16(v, config)
+       case OpLeq16U:
+               return rewriteValueAMD64_OpLeq16U(v, config)
+       case OpLeq32:
+               return rewriteValueAMD64_OpLeq32(v, config)
+       case OpLeq32F:
+               return rewriteValueAMD64_OpLeq32F(v, config)
+       case OpLeq32U:
+               return rewriteValueAMD64_OpLeq32U(v, config)
+       case OpLeq64:
+               return rewriteValueAMD64_OpLeq64(v, config)
+       case OpLeq64F:
+               return rewriteValueAMD64_OpLeq64F(v, config)
+       case OpLeq64U:
+               return rewriteValueAMD64_OpLeq64U(v, config)
+       case OpLeq8:
+               return rewriteValueAMD64_OpLeq8(v, config)
+       case OpLeq8U:
+               return rewriteValueAMD64_OpLeq8U(v, config)
+       case OpLess16:
+               return rewriteValueAMD64_OpLess16(v, config)
+       case OpLess16U:
+               return rewriteValueAMD64_OpLess16U(v, config)
+       case OpLess32:
+               return rewriteValueAMD64_OpLess32(v, config)
+       case OpLess32F:
+               return rewriteValueAMD64_OpLess32F(v, config)
+       case OpLess32U:
+               return rewriteValueAMD64_OpLess32U(v, config)
+       case OpLess64:
+               return rewriteValueAMD64_OpLess64(v, config)
+       case OpLess64F:
+               return rewriteValueAMD64_OpLess64F(v, config)
+       case OpLess64U:
+               return rewriteValueAMD64_OpLess64U(v, config)
+       case OpLess8:
+               return rewriteValueAMD64_OpLess8(v, config)
+       case OpLess8U:
+               return rewriteValueAMD64_OpLess8U(v, config)
+       case OpLoad:
+               return rewriteValueAMD64_OpLoad(v, config)
+       case OpLrot16:
+               return rewriteValueAMD64_OpLrot16(v, config)
+       case OpLrot32:
+               return rewriteValueAMD64_OpLrot32(v, config)
+       case OpLrot64:
+               return rewriteValueAMD64_OpLrot64(v, config)
+       case OpLrot8:
+               return rewriteValueAMD64_OpLrot8(v, config)
+       case OpLsh16x16:
+               return rewriteValueAMD64_OpLsh16x16(v, config)
+       case OpLsh16x32:
+               return rewriteValueAMD64_OpLsh16x32(v, config)
+       case OpLsh16x64:
+               return rewriteValueAMD64_OpLsh16x64(v, config)
+       case OpLsh16x8:
+               return rewriteValueAMD64_OpLsh16x8(v, config)
+       case OpLsh32x16:
+               return rewriteValueAMD64_OpLsh32x16(v, config)
+       case OpLsh32x32:
+               return rewriteValueAMD64_OpLsh32x32(v, config)
+       case OpLsh32x64:
+               return rewriteValueAMD64_OpLsh32x64(v, config)
+       case OpLsh32x8:
+               return rewriteValueAMD64_OpLsh32x8(v, config)
+       case OpLsh64x16:
+               return rewriteValueAMD64_OpLsh64x16(v, config)
+       case OpLsh64x32:
+               return rewriteValueAMD64_OpLsh64x32(v, config)
+       case OpLsh64x64:
+               return rewriteValueAMD64_OpLsh64x64(v, config)
+       case OpLsh64x8:
+               return rewriteValueAMD64_OpLsh64x8(v, config)
+       case OpLsh8x16:
+               return rewriteValueAMD64_OpLsh8x16(v, config)
+       case OpLsh8x32:
+               return rewriteValueAMD64_OpLsh8x32(v, config)
+       case OpLsh8x64:
+               return rewriteValueAMD64_OpLsh8x64(v, config)
+       case OpLsh8x8:
+               return rewriteValueAMD64_OpLsh8x8(v, config)
+       case OpMod16:
+               return rewriteValueAMD64_OpMod16(v, config)
+       case OpMod16u:
+               return rewriteValueAMD64_OpMod16u(v, config)
+       case OpMod32:
+               return rewriteValueAMD64_OpMod32(v, config)
+       case OpMod32u:
+               return rewriteValueAMD64_OpMod32u(v, config)
+       case OpMod64:
+               return rewriteValueAMD64_OpMod64(v, config)
+       case OpMod64u:
+               return rewriteValueAMD64_OpMod64u(v, config)
+       case OpMod8:
+               return rewriteValueAMD64_OpMod8(v, config)
+       case OpMod8u:
+               return rewriteValueAMD64_OpMod8u(v, config)
+       case OpMove:
+               return rewriteValueAMD64_OpMove(v, config)
+       case OpMul16:
+               return rewriteValueAMD64_OpMul16(v, config)
+       case OpMul32:
+               return rewriteValueAMD64_OpMul32(v, config)
+       case OpMul32F:
+               return rewriteValueAMD64_OpMul32F(v, config)
+       case OpMul64:
+               return rewriteValueAMD64_OpMul64(v, config)
+       case OpMul64F:
+               return rewriteValueAMD64_OpMul64F(v, config)
+       case OpMul8:
+               return rewriteValueAMD64_OpMul8(v, config)
+       case OpNeg16:
+               return rewriteValueAMD64_OpNeg16(v, config)
+       case OpNeg32:
+               return rewriteValueAMD64_OpNeg32(v, config)
+       case OpNeg32F:
+               return rewriteValueAMD64_OpNeg32F(v, config)
+       case OpNeg64:
+               return rewriteValueAMD64_OpNeg64(v, config)
+       case OpNeg64F:
+               return rewriteValueAMD64_OpNeg64F(v, config)
+       case OpNeg8:
+               return rewriteValueAMD64_OpNeg8(v, config)
+       case OpNeq16:
+               return rewriteValueAMD64_OpNeq16(v, config)
+       case OpNeq32:
+               return rewriteValueAMD64_OpNeq32(v, config)
+       case OpNeq32F:
+               return rewriteValueAMD64_OpNeq32F(v, config)
+       case OpNeq64:
+               return rewriteValueAMD64_OpNeq64(v, config)
+       case OpNeq64F:
+               return rewriteValueAMD64_OpNeq64F(v, config)
+       case OpNeq8:
+               return rewriteValueAMD64_OpNeq8(v, config)
+       case OpNeqB:
+               return rewriteValueAMD64_OpNeqB(v, config)
+       case OpNeqPtr:
+               return rewriteValueAMD64_OpNeqPtr(v, config)
+       case OpNilCheck:
+               return rewriteValueAMD64_OpNilCheck(v, config)
+       case OpNot:
+               return rewriteValueAMD64_OpNot(v, config)
+       case OpOffPtr:
+               return rewriteValueAMD64_OpOffPtr(v, config)
+       case OpOr16:
+               return rewriteValueAMD64_OpOr16(v, config)
+       case OpOr32:
+               return rewriteValueAMD64_OpOr32(v, config)
+       case OpOr64:
+               return rewriteValueAMD64_OpOr64(v, config)
+       case OpOr8:
+               return rewriteValueAMD64_OpOr8(v, config)
+       case OpOrB:
+               return rewriteValueAMD64_OpOrB(v, config)
+       case OpRsh16Ux16:
+               return rewriteValueAMD64_OpRsh16Ux16(v, config)
+       case OpRsh16Ux32:
+               return rewriteValueAMD64_OpRsh16Ux32(v, config)
+       case OpRsh16Ux64:
+               return rewriteValueAMD64_OpRsh16Ux64(v, config)
+       case OpRsh16Ux8:
+               return rewriteValueAMD64_OpRsh16Ux8(v, config)
+       case OpRsh16x16:
+               return rewriteValueAMD64_OpRsh16x16(v, config)
+       case OpRsh16x32:
+               return rewriteValueAMD64_OpRsh16x32(v, config)
+       case OpRsh16x64:
+               return rewriteValueAMD64_OpRsh16x64(v, config)
+       case OpRsh16x8:
+               return rewriteValueAMD64_OpRsh16x8(v, config)
+       case OpRsh32Ux16:
+               return rewriteValueAMD64_OpRsh32Ux16(v, config)
+       case OpRsh32Ux32:
+               return rewriteValueAMD64_OpRsh32Ux32(v, config)
+       case OpRsh32Ux64:
+               return rewriteValueAMD64_OpRsh32Ux64(v, config)
+       case OpRsh32Ux8:
+               return rewriteValueAMD64_OpRsh32Ux8(v, config)
+       case OpRsh32x16:
+               return rewriteValueAMD64_OpRsh32x16(v, config)
+       case OpRsh32x32:
+               return rewriteValueAMD64_OpRsh32x32(v, config)
+       case OpRsh32x64:
+               return rewriteValueAMD64_OpRsh32x64(v, config)
+       case OpRsh32x8:
+               return rewriteValueAMD64_OpRsh32x8(v, config)
+       case OpRsh64Ux16:
+               return rewriteValueAMD64_OpRsh64Ux16(v, config)
+       case OpRsh64Ux32:
+               return rewriteValueAMD64_OpRsh64Ux32(v, config)
+       case OpRsh64Ux64:
+               return rewriteValueAMD64_OpRsh64Ux64(v, config)
+       case OpRsh64Ux8:
+               return rewriteValueAMD64_OpRsh64Ux8(v, config)
+       case OpRsh64x16:
+               return rewriteValueAMD64_OpRsh64x16(v, config)
+       case OpRsh64x32:
+               return rewriteValueAMD64_OpRsh64x32(v, config)
+       case OpRsh64x64:
+               return rewriteValueAMD64_OpRsh64x64(v, config)
+       case OpRsh64x8:
+               return rewriteValueAMD64_OpRsh64x8(v, config)
+       case OpRsh8Ux16:
+               return rewriteValueAMD64_OpRsh8Ux16(v, config)
+       case OpRsh8Ux32:
+               return rewriteValueAMD64_OpRsh8Ux32(v, config)
+       case OpRsh8Ux64:
+               return rewriteValueAMD64_OpRsh8Ux64(v, config)
+       case OpRsh8Ux8:
+               return rewriteValueAMD64_OpRsh8Ux8(v, config)
+       case OpRsh8x16:
+               return rewriteValueAMD64_OpRsh8x16(v, config)
+       case OpRsh8x32:
+               return rewriteValueAMD64_OpRsh8x32(v, config)
+       case OpRsh8x64:
+               return rewriteValueAMD64_OpRsh8x64(v, config)
+       case OpRsh8x8:
+               return rewriteValueAMD64_OpRsh8x8(v, config)
+       case OpSignExt16to32:
+               return rewriteValueAMD64_OpSignExt16to32(v, config)
+       case OpSignExt16to64:
+               return rewriteValueAMD64_OpSignExt16to64(v, config)
+       case OpSignExt32to64:
+               return rewriteValueAMD64_OpSignExt32to64(v, config)
+       case OpSignExt8to16:
+               return rewriteValueAMD64_OpSignExt8to16(v, config)
+       case OpSignExt8to32:
+               return rewriteValueAMD64_OpSignExt8to32(v, config)
+       case OpSignExt8to64:
+               return rewriteValueAMD64_OpSignExt8to64(v, config)
+       case OpSqrt:
+               return rewriteValueAMD64_OpSqrt(v, config)
+       case OpStaticCall:
+               return rewriteValueAMD64_OpStaticCall(v, config)
+       case OpStore:
+               return rewriteValueAMD64_OpStore(v, config)
+       case OpSub16:
+               return rewriteValueAMD64_OpSub16(v, config)
+       case OpSub32:
+               return rewriteValueAMD64_OpSub32(v, config)
+       case OpSub32F:
+               return rewriteValueAMD64_OpSub32F(v, config)
+       case OpSub64:
+               return rewriteValueAMD64_OpSub64(v, config)
+       case OpSub64F:
+               return rewriteValueAMD64_OpSub64F(v, config)
+       case OpSub8:
+               return rewriteValueAMD64_OpSub8(v, config)
+       case OpSubPtr:
+               return rewriteValueAMD64_OpSubPtr(v, config)
+       case OpTrunc16to8:
+               return rewriteValueAMD64_OpTrunc16to8(v, config)
+       case OpTrunc32to16:
+               return rewriteValueAMD64_OpTrunc32to16(v, config)
+       case OpTrunc32to8:
+               return rewriteValueAMD64_OpTrunc32to8(v, config)
+       case OpTrunc64to16:
+               return rewriteValueAMD64_OpTrunc64to16(v, config)
+       case OpTrunc64to32:
+               return rewriteValueAMD64_OpTrunc64to32(v, config)
+       case OpTrunc64to8:
+               return rewriteValueAMD64_OpTrunc64to8(v, config)
+       case OpXor16:
+               return rewriteValueAMD64_OpXor16(v, config)
+       case OpXor32:
+               return rewriteValueAMD64_OpXor32(v, config)
+       case OpXor64:
+               return rewriteValueAMD64_OpXor64(v, config)
+       case OpXor8:
+               return rewriteValueAMD64_OpXor8(v, config)
+       case OpZero:
+               return rewriteValueAMD64_OpZero(v, config)
+       case OpZeroExt16to32:
+               return rewriteValueAMD64_OpZeroExt16to32(v, config)
+       case OpZeroExt16to64:
+               return rewriteValueAMD64_OpZeroExt16to64(v, config)
+       case OpZeroExt32to64:
+               return rewriteValueAMD64_OpZeroExt32to64(v, config)
+       case OpZeroExt8to16:
+               return rewriteValueAMD64_OpZeroExt8to16(v, config)
+       case OpZeroExt8to32:
+               return rewriteValueAMD64_OpZeroExt8to32(v, config)
+       case OpZeroExt8to64:
+               return rewriteValueAMD64_OpZeroExt8to64(v, config)
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ADDL x (MOVLconst [c]))
+       // cond:
+       // result: (ADDLconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpAMD64ADDLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (ADDL (MOVLconst [c]) x)
+       // cond:
+       // result: (ADDLconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpAMD64ADDLconst)
                v.AuxInt = c
                v.AddArg(x)
                return true
@@ -1531,708 +1531,929 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add16  x y)
+       // match: (CMOVLEQconst x (InvertFlags y) [c])
        // cond:
-       // result: (ADDL  x y)
+       // result: (CMOVLNEconst x y [c])
        for {
+               c := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ADDL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64InvertFlags {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64CMOVLNEconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       // match: (CMOVLEQconst _ (FlagEQ) [c])
+       // cond:
+       // result: (Const32 [c])
+       for {
+               c := v.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagEQ {
+                       break
+               }
+               v.reset(OpConst32)
+               v.AuxInt = c
+               return true
+       }
+       // match: (CMOVLEQconst x (FlagLT_ULT))
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (CMOVLEQconst x (FlagLT_UGT))
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (CMOVLEQconst x (FlagGT_ULT))
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (CMOVLEQconst x (FlagGT_UGT))
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQEQconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (CMOVQEQconst x (InvertFlags y) [c])
+       // cond:
+       // result: (CMOVQNEconst x y [c])
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64InvertFlags {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64CMOVQNEconst)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32  x y)
+       // match: (CMOVQEQconst _ (FlagEQ) [c])
        // cond:
-       // result: (ADDL  x y)
+       // result: (Const64 [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ADDL)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagEQ {
+                       break
+               }
+               v.reset(OpConst64)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32F x y)
+       // match: (CMOVQEQconst x (FlagLT_ULT))
        // cond:
-       // result: (ADDSS x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ADDSS)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add64  x y)
+       // match: (CMOVQEQconst x (FlagLT_UGT))
        // cond:
-       // result: (ADDQ  x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ADDQ)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add64F x y)
+       // match: (CMOVQEQconst x (FlagGT_ULT))
        // cond:
-       // result: (ADDSD x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ADDSD)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add8   x y)
+       // match: (CMOVQEQconst x (FlagGT_UGT))
        // cond:
-       // result: (ADDL  x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ADDL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMOVWEQconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (AddPtr x y)
+       // match: (CMOVWEQconst x (InvertFlags y) [c])
        // cond:
-       // result: (ADDQ  x y)
+       // result: (CMOVWNEconst x y [c])
        for {
+               c := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ADDQ)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64InvertFlags {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64CMOVWNEconst)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Addr {sym} base)
+       // match: (CMOVWEQconst _ (FlagEQ) [c])
        // cond:
-       // result: (LEAQ {sym} base)
+       // result: (Const16 [c])
        for {
-               sym := v.Aux
-               base := v.Args[0]
-               v.reset(OpAMD64LEAQ)
-               v.Aux = sym
-               v.AddArg(base)
+               c := v.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagEQ {
+                       break
+               }
+               v.reset(OpConst16)
+               v.AuxInt = c
                return true
        }
-}
-func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And16 x y)
+       // match: (CMOVWEQconst x (FlagLT_ULT))
        // cond:
-       // result: (ANDL x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagLT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And32 x y)
+       // match: (CMOVWEQconst x (FlagLT_UGT))
        // cond:
-       // result: (ANDL x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagLT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And64 x y)
+       // match: (CMOVWEQconst x (FlagGT_ULT))
        // cond:
-       // result: (ANDQ x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And8  x y)
+       // match: (CMOVWEQconst x (FlagGT_UGT))
        // cond:
-       // result: (ANDL x y)
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (AndB x y)
+       // match: (CMPB x (MOVLconst [c]))
        // cond:
-       // result: (ANDL x y)
+       // result: (CMPBconst x [int64(int8(c))])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpAMD64CMPBconst)
+               v.AuxInt = int64(int8(c))
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       // match: (CMPB (MOVLconst [c]) x)
+       // cond:
+       // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpAMD64InvertFlags)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v0.AuxInt = int64(int8(c))
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       return false
 }
-func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Avg64u x y)
-       // cond:
-       // result: (AVGQU x y)
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)==int8(y)
+       // result: (FlagEQ)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64AVGQU)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(int8(x) == int8(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagEQ)
                return true
        }
-}
-func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Bswap32 x)
-       // cond:
-       // result: (BSWAPL x)
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
+       // result: (FlagLT_ULT)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64BSWAPL)
-               v.AddArg(x)
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-}
-func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Bswap64 x)
-       // cond:
-       // result: (BSWAPQ x)
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
+       // result: (FlagLT_UGT)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64BSWAPQ)
-               v.AddArg(x)
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_UGT)
                return true
        }
-}
-func rewriteValueAMD64_OpAMD64CMOVLEQconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (CMOVLEQconst x (InvertFlags y) [c])
-       // cond:
-       // result: (CMOVLNEconst x y [c])
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
+       // result: (FlagGT_ULT)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64InvertFlags {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               y := v_1.Args[0]
-               c := v.AuxInt
-               v.reset(OpAMD64CMOVLNEconst)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               x := v_0.AuxInt
+               if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagGT_ULT)
                return true
        }
-       // match: (CMOVLEQconst _ (FlagEQ) [c])
-       // cond:
-       // result: (Const32 [c])
+       // match: (CMPBconst (MOVLconst [x]) [y])
+       // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
+       // result: (FlagGT_UGT)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagEQ {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               c := v.AuxInt
-               v.reset(OpConst32)
-               v.AuxInt = c
+               x := v_0.AuxInt
+               if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagGT_UGT)
                return true
        }
-       // match: (CMOVLEQconst x (FlagLT_ULT))
-       // cond:
-       // result: x
+       // match: (CMPBconst (ANDLconst _ [m]) [n])
+       // cond: 0 <= int8(m) && int8(m) < int8(n)
+       // result: (FlagLT_ULT)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagLT_ULT {
+               n := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               m := v_0.AuxInt
+               if !(0 <= int8(m) && int8(m) < int8(n)) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMOVLEQconst x (FlagLT_UGT))
+       // match: (CMPBconst (ANDL x y) [0])
        // cond:
-       // result: x
+       // result: (TESTB x y)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagLT_UGT {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDL {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpAMD64TESTB)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMOVLEQconst x (FlagGT_ULT))
+       // match: (CMPBconst (ANDLconst [c] x) [0])
        // cond:
-       // result: x
+       // result: (TESTBconst [int64(int8(c))] x)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagGT_ULT {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64TESTBconst)
+               v.AuxInt = int64(int8(c))
                v.AddArg(x)
                return true
        }
-       // match: (CMOVLEQconst x (FlagGT_UGT))
+       // match: (CMPBconst x [0])
        // cond:
-       // result: x
+       // result: (TESTB x x)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagGT_UGT {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               x := v.Args[0]
+               v.reset(OpAMD64TESTB)
+               v.AddArg(x)
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64CMOVQEQconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (CMOVQEQconst x (InvertFlags y) [c])
+       // match: (CMPL x (MOVLconst [c]))
        // cond:
-       // result: (CMOVQNEconst x y [c])
+       // result: (CMPLconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64InvertFlags {
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               y := v_1.Args[0]
-               c := v.AuxInt
-               v.reset(OpAMD64CMOVQNEconst)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v_1.AuxInt
+               v.reset(OpAMD64CMPLconst)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (CMOVQEQconst _ (FlagEQ) [c])
+       // match: (CMPL (MOVLconst [c]) x)
        // cond:
-       // result: (Const64 [c])
+       // result: (InvertFlags (CMPLconst x [c]))
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagEQ {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               c := v.AuxInt
-               v.reset(OpConst64)
-               v.AuxInt = c
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpAMD64InvertFlags)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v0.AuxInt = c
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (CMOVQEQconst x (FlagLT_ULT))
-       // cond:
-       // result: x
+       return false
+}
+func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (CMPLconst (MOVLconst [x]) [y])
+       // cond: int32(x)==int32(y)
+       // result: (FlagEQ)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagLT_ULT {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               x := v_0.AuxInt
+               if !(int32(x) == int32(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagEQ)
                return true
        }
-       // match: (CMOVQEQconst x (FlagLT_UGT))
-       // cond:
-       // result: x
+       // match: (CMPLconst (MOVLconst [x]) [y])
+       // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+       // result: (FlagLT_ULT)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagLT_UGT {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               x := v_0.AuxInt
+               if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMOVQEQconst x (FlagGT_ULT))
-       // cond:
-       // result: x
+       // match: (CMPLconst (MOVLconst [x]) [y])
+       // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+       // result: (FlagLT_UGT)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagGT_ULT {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.reset(OpAMD64FlagLT_UGT)
                return true
        }
-       // match: (CMOVQEQconst x (FlagGT_UGT))
-       // cond:
-       // result: x
+       // match: (CMPLconst (MOVLconst [x]) [y])
+       // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+       // result: (FlagGT_ULT)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagGT_UGT {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               x := v_0.AuxInt
+               if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagGT_ULT)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64CMOVWEQconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (CMOVWEQconst x (InvertFlags y) [c])
-       // cond:
-       // result: (CMOVWNEconst x y [c])
+       // match: (CMPLconst (MOVLconst [x]) [y])
+       // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+       // result: (FlagGT_UGT)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64InvertFlags {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               y := v_1.Args[0]
-               c := v.AuxInt
-               v.reset(OpAMD64CMOVWNEconst)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               x := v_0.AuxInt
+               if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagGT_UGT)
                return true
        }
-       // match: (CMOVWEQconst _ (FlagEQ) [c])
-       // cond:
-       // result: (Const16 [c])
+       // match: (CMPLconst (SHRLconst _ [c]) [n])
+       // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+       // result: (FlagLT_ULT)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagEQ {
+               n := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64SHRLconst {
                        break
                }
-               c := v.AuxInt
-               v.reset(OpConst16)
-               v.AuxInt = c
+               c := v_0.AuxInt
+               if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMOVWEQconst x (FlagLT_ULT))
-       // cond:
-       // result: x
+       // match: (CMPLconst (ANDLconst _ [m]) [n])
+       // cond: 0 <= int32(m) && int32(m) < int32(n)
+       // result: (FlagLT_ULT)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagLT_ULT {
+               n := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               m := v_0.AuxInt
+               if !(0 <= int32(m) && int32(m) < int32(n)) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMOVWEQconst x (FlagLT_UGT))
+       // match: (CMPLconst (ANDL x y) [0])
        // cond:
-       // result: x
+       // result: (TESTL x y)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagLT_UGT {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDL {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpAMD64TESTL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMOVWEQconst x (FlagGT_ULT))
+       // match: (CMPLconst (ANDLconst [c] x) [0])
        // cond:
-       // result: x
+       // result: (TESTLconst [c] x)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagGT_ULT {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64TESTLconst)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (CMOVWEQconst x (FlagGT_UGT))
+       // match: (CMPLconst x [0])
        // cond:
-       // result: x
+       // result: (TESTL x x)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64FlagGT_UGT {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               x := v.Args[0]
+               v.reset(OpAMD64TESTL)
+               v.AddArg(x)
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (CMPB x (MOVLconst [c]))
-       // cond:
-       // result: (CMPBconst x [int64(int8(c))])
+       // match: (CMPQ x (MOVQconst [c]))
+       // cond: is32Bit(c)
+       // result: (CMPQconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpAMD64CMPBconst)
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpAMD64CMPQconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AuxInt = int64(int8(c))
                return true
        }
-       // match: (CMPB (MOVLconst [c]) x)
-       // cond:
-       // result: (InvertFlags (CMPBconst x [int64(int8(c))]))
+       // match: (CMPQ (MOVQconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (InvertFlags (CMPQconst x [c]))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
                v.reset(OpAMD64InvertFlags)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v0.AuxInt = c
                v0.AddArg(x)
-               v0.AuxInt = int64(int8(c))
                v.AddArg(v0)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)==int8(y)
+       // match: (CMPQconst (MOVQconst [x]) [y])
+       // cond: x==y
        // result: (FlagEQ)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int8(x) == int8(y)) {
+               if !(x == y) {
                        break
                }
                v.reset(OpAMD64FlagEQ)
                return true
        }
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)<int8(y) && uint8(x)<uint8(y)
+       // match: (CMPQconst (MOVQconst [x]) [y])
+       // cond: x<y && uint64(x)<uint64(y)
        // result: (FlagLT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
+               if !(x < y && uint64(x) < uint64(y)) {
                        break
                }
                v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)<int8(y) && uint8(x)>uint8(y)
+       // match: (CMPQconst (MOVQconst [x]) [y])
+       // cond: x<y && uint64(x)>uint64(y)
        // result: (FlagLT_UGT)
        for {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(x < y && uint64(x) > uint64(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_UGT)
+               return true
+       }
+       // match: (CMPQconst (MOVQconst [x]) [y])
+       // cond: x>y && uint64(x)<uint64(y)
+       // result: (FlagGT_ULT)
+       for {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(x > y && uint64(x) < uint64(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagGT_ULT)
+               return true
+       }
+       // match: (CMPQconst (MOVQconst [x]) [y])
+       // cond: x>y && uint64(x)>uint64(y)
+       // result: (FlagGT_UGT)
+       for {
+               y := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
+                       break
+               }
+               x := v_0.AuxInt
+               if !(x > y && uint64(x) > uint64(y)) {
+                       break
+               }
+               v.reset(OpAMD64FlagGT_UGT)
+               return true
+       }
+       // match: (CMPQconst (MOVBQZX _) [c])
+       // cond: 0xFF < c
+       // result: (FlagLT_ULT)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVBQZX {
+                       break
+               }
+               if !(0xFF < c) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_ULT)
+               return true
+       }
+       // match: (CMPQconst (MOVWQZX _) [c])
+       // cond: 0xFFFF < c
+       // result: (FlagLT_ULT)
+       for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64MOVWQZX {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
+               if !(0xFFFF < c) {
                        break
                }
-               v.reset(OpAMD64FlagLT_UGT)
+               v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)>int8(y) && uint8(x)<uint8(y)
-       // result: (FlagGT_ULT)
+       // match: (CMPQconst (MOVLQZX _) [c])
+       // cond: 0xFFFFFFFF < c
+       // result: (FlagLT_ULT)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64MOVLQZX {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
+               if !(0xFFFFFFFF < c) {
                        break
                }
-               v.reset(OpAMD64FlagGT_ULT)
+               v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMPBconst (MOVLconst [x]) [y])
-       // cond: int8(x)>int8(y) && uint8(x)>uint8(y)
-       // result: (FlagGT_UGT)
+       // match: (CMPQconst (SHRQconst _ [c]) [n])
+       // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
+       // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64SHRQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
+               c := v_0.AuxInt
+               if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
                        break
                }
-               v.reset(OpAMD64FlagGT_UGT)
+               v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMPBconst (ANDLconst _ [m]) [n])
-       // cond: 0 <= int8(m) && int8(m) < int8(n)
+       // match: (CMPQconst (ANDQconst _ [m]) [n])
+       // cond: 0 <= m && m < n
        // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               if v_0.Op != OpAMD64ANDQconst {
                        break
                }
                m := v_0.AuxInt
-               n := v.AuxInt
-               if !(0 <= int8(m) && int8(m) < int8(n)) {
+               if !(0 <= m && m < n) {
                        break
                }
                v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMPBconst (ANDL x y) [0])
+       // match: (CMPQconst (ANDQ x y) [0])
        // cond:
-       // result: (TESTB x y)
+       // result: (TESTQ x y)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDL {
+               if v_0.Op != OpAMD64ANDQ {
                        break
                }
                x := v_0.Args[0]
                y := v_0.Args[1]
-               if v.AuxInt != 0 {
-                       break
-               }
-               v.reset(OpAMD64TESTB)
+               v.reset(OpAMD64TESTQ)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-       // match: (CMPBconst (ANDLconst [c] x) [0])
+       // match: (CMPQconst (ANDQconst [c] x) [0])
        // cond:
-       // result: (TESTBconst [int64(int8(c))] x)
+       // result: (TESTQconst [c] x)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               if v_0.Op != OpAMD64ANDQconst {
                        break
                }
                c := v_0.AuxInt
                x := v_0.Args[0]
-               if v.AuxInt != 0 {
-                       break
-               }
-               v.reset(OpAMD64TESTBconst)
-               v.AuxInt = int64(int8(c))
+               v.reset(OpAMD64TESTQconst)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (CMPBconst x [0])
+       // match: (CMPQconst x [0])
        // cond:
-       // result: (TESTB x x)
+       // result: (TESTQ x x)
        for {
-               x := v.Args[0]
                if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpAMD64TESTB)
+               x := v.Args[0]
+               v.reset(OpAMD64TESTQ)
                v.AddArg(x)
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (CMPL x (MOVLconst [c]))
+       // match: (CMPW x (MOVLconst [c]))
        // cond:
-       // result: (CMPLconst x [c])
+       // result: (CMPWconst x [int64(int16(c))])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -2240,14 +2461,14 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpAMD64CMPLconst)
+               v.reset(OpAMD64CMPWconst)
+               v.AuxInt = int64(int16(c))
                v.AddArg(x)
-               v.AuxInt = c
                return true
        }
-       // match: (CMPL (MOVLconst [c]) x)
+       // match: (CMPW (MOVLconst [c]) x)
        // cond:
-       // result: (InvertFlags (CMPLconst x [c]))
+       // result: (InvertFlags (CMPWconst x [int64(int16(c))]))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64MOVLconst {
@@ -2256,2206 +2477,3514 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
                c := v_0.AuxInt
                x := v.Args[1]
                v.reset(OpAMD64InvertFlags)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v0.AuxInt = int64(int16(c))
                v0.AddArg(x)
-               v0.AuxInt = c
                v.AddArg(v0)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (CMPLconst (MOVLconst [x]) [y])
-       // cond: int32(x)==int32(y)
+       // match: (CMPWconst (MOVLconst [x]) [y])
+       // cond: int16(x)==int16(y)
        // result: (FlagEQ)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) == int32(y)) {
+               if !(int16(x) == int16(y)) {
                        break
                }
                v.reset(OpAMD64FlagEQ)
                return true
        }
-       // match: (CMPLconst (MOVLconst [x]) [y])
-       // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
+       // match: (CMPWconst (MOVLconst [x]) [y])
+       // cond: int16(x)<int16(y) && uint16(x)<uint16(y)
        // result: (FlagLT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
+               if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
                        break
                }
                v.reset(OpAMD64FlagLT_ULT)
                return true
        }
-       // match: (CMPLconst (MOVLconst [x]) [y])
-       // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
+       // match: (CMPWconst (MOVLconst [x]) [y])
+       // cond: int16(x)<int16(y) && uint16(x)>uint16(y)
        // result: (FlagLT_UGT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
+               if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
                        break
                }
                v.reset(OpAMD64FlagLT_UGT)
                return true
        }
-       // match: (CMPLconst (MOVLconst [x]) [y])
-       // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
+       // match: (CMPWconst (MOVLconst [x]) [y])
+       // cond: int16(x)>int16(y) && uint16(x)<uint16(y)
        // result: (FlagGT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
+               if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
                        break
                }
                v.reset(OpAMD64FlagGT_ULT)
                return true
        }
-       // match: (CMPLconst (MOVLconst [x]) [y])
-       // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
+       // match: (CMPWconst (MOVLconst [x]) [y])
+       // cond: int16(x)>int16(y) && uint16(x)>uint16(y)
        // result: (FlagGT_UGT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64MOVLconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
+               if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
                        break
                }
                v.reset(OpAMD64FlagGT_UGT)
                return true
        }
-       // match: (CMPLconst (SHRLconst _ [c]) [n])
-       // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+       // match: (CMPWconst (ANDLconst _ [m]) [n])
+       // cond: 0 <= int16(m) && int16(m) < int16(n)
        // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64SHRLconst {
+               if v_0.Op != OpAMD64ANDLconst {
+                       break
+               }
+               m := v_0.AuxInt
+               if !(0 <= int16(m) && int16(m) < int16(n)) {
+                       break
+               }
+               v.reset(OpAMD64FlagLT_ULT)
+               return true
+       }
+       // match: (CMPWconst (ANDL x y) [0])
+       // cond:
+       // result: (TESTW x y)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDL {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpAMD64TESTW)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       // match: (CMPWconst (ANDLconst [c] x) [0])
+       // cond:
+       // result: (TESTWconst [int64(int16(c))] x)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
                        break
                }
                c := v_0.AuxInt
-               n := v.AuxInt
-               if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+               x := v_0.Args[0]
+               v.reset(OpAMD64TESTWconst)
+               v.AuxInt = int64(int16(c))
+               v.AddArg(x)
+               return true
+       }
+       // match: (CMPWconst x [0])
+       // cond:
+       // result: (TESTW x x)
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpAMD64TESTW)
+               v.AddArg(x)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (LEAQ [c] {s} (ADDQconst [d] x))
+       // cond: is32Bit(c+d)
+       // result: (LEAQ [c+d] {s} x)
+       for {
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(c + d)) {
+                       break
+               }
+               v.reset(OpAMD64LEAQ)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
+               return true
+       }
+       // match: (LEAQ [c] {s} (ADDQ x y))
+       // cond: x.Op != OpSB && y.Op != OpSB
+       // result: (LEAQ1 [c] {s} x y)
+       for {
+               c := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               if !(x.Op != OpSB && y.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               v.reset(OpAMD64LEAQ1)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPLconst (ANDLconst _ [m]) [n])
-       // cond: 0 <= int32(m) && int32(m) < int32(n)
-       // result: (FlagLT_ULT)
+       // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               m := v_0.AuxInt
-               n := v.AuxInt
-               if !(0 <= int32(m) && int32(m) < int32(n)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               v.reset(OpAMD64LEAQ)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
                return true
        }
-       // match: (CMPLconst (ANDL x y) [0])
-       // cond:
-       // result: (TESTL x y)
+       // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDL {
+               if v_0.Op != OpAMD64LEAQ1 {
                        break
                }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
                x := v_0.Args[0]
                y := v_0.Args[1]
-               if v.AuxInt != 0 {
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64TESTL)
+               v.reset(OpAMD64LEAQ1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-       // match: (CMPLconst (ANDLconst [c] x) [0])
-       // cond:
-       // result: (TESTLconst [c] x)
+       // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               if v_0.Op != OpAMD64LEAQ2 {
                        break
                }
-               c := v_0.AuxInt
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
                x := v_0.Args[0]
-               if v.AuxInt != 0 {
-                       break
-               }
-               v.reset(OpAMD64TESTLconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (CMPLconst x [0])
-       // cond:
-       // result: (TESTL x x)
-       for {
-               x := v.Args[0]
-               if v.AuxInt != 0 {
+               y := v_0.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64TESTL)
-               v.AddArg(x)
+               v.reset(OpAMD64LEAQ2)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (CMPQ x (MOVQconst [c]))
-       // cond: is32Bit(c)
-       // result: (CMPQconst x [c])
+       // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ4 {
                        break
                }
-               c := v_1.AuxInt
-               if !(is32Bit(c)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64CMPQconst)
+               v.reset(OpAMD64LEAQ4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(x)
-               v.AuxInt = c
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQ (MOVQconst [c]) x)
-       // cond: is32Bit(c)
-       // result: (InvertFlags (CMPQconst x [c]))
+       // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
+               if v_0.Op != OpAMD64LEAQ8 {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(is32Bit(c)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64InvertFlags)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v0.AddArg(x)
-               v0.AuxInt = c
-               v.AddArg(v0)
+               v.reset(OpAMD64LEAQ8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (CMPQconst (MOVQconst [x]) [y])
-       // cond: x==y
-       // result: (FlagEQ)
+       // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
+       // cond: is32Bit(c+d)   && x.Op != OpSB
+       // result: (LEAQ1 [c+d] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(x == y) {
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(c+d) && x.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagEQ)
+               v.reset(OpAMD64LEAQ1)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQconst (MOVQconst [x]) [y])
-       // cond: x<y && uint64(x)<uint64(y)
-       // result: (FlagLT_ULT)
+       // match: (LEAQ1 [c] {s} x (ADDQconst [d] y))
+       // cond: is32Bit(c+d)   && y.Op != OpSB
+       // result: (LEAQ1 [c+d] {s} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(x < y && uint64(x) < uint64(y)) {
+               d := v_1.AuxInt
+               y := v_1.Args[0]
+               if !(is32Bit(c+d) && y.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               v.reset(OpAMD64LEAQ1)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQconst (MOVQconst [x]) [y])
-       // cond: x<y && uint64(x)>uint64(y)
-       // result: (FlagLT_UGT)
+       // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
+       // cond:
+       // result: (LEAQ2 [c] {s} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(x < y && uint64(x) > uint64(y)) {
+               if v_1.AuxInt != 1 {
                        break
                }
-               v.reset(OpAMD64FlagLT_UGT)
+               y := v_1.Args[0]
+               v.reset(OpAMD64LEAQ2)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQconst (MOVQconst [x]) [y])
-       // cond: x>y && uint64(x)<uint64(y)
-       // result: (FlagGT_ULT)
+       // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y)
+       // cond:
+       // result: (LEAQ2 [c] {s} y x)
        for {
+               c := v.AuxInt
+               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
+               if v_0.Op != OpAMD64SHLQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(x > y && uint64(x) < uint64(y)) {
+               if v_0.AuxInt != 1 {
                        break
                }
-               v.reset(OpAMD64FlagGT_ULT)
+               x := v_0.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64LEAQ2)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(y)
+               v.AddArg(x)
                return true
        }
-       // match: (CMPQconst (MOVQconst [x]) [y])
-       // cond: x>y && uint64(x)>uint64(y)
-       // result: (FlagGT_UGT)
+       // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
+       // cond:
+       // result: (LEAQ4 [c] {s} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(x > y && uint64(x) > uint64(y)) {
+               if v_1.AuxInt != 2 {
                        break
                }
-               v.reset(OpAMD64FlagGT_UGT)
+               y := v_1.Args[0]
+               v.reset(OpAMD64LEAQ4)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQconst (MOVBQZX _) [c])
-       // cond: 0xFF < c
-       // result: (FlagLT_ULT)
+       // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y)
+       // cond:
+       // result: (LEAQ4 [c] {s} y x)
        for {
+               c := v.AuxInt
+               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVBQZX {
+               if v_0.Op != OpAMD64SHLQconst {
                        break
                }
-               c := v.AuxInt
-               if !(0xFF < c) {
+               if v_0.AuxInt != 2 {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               x := v_0.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64LEAQ4)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(y)
+               v.AddArg(x)
                return true
        }
-       // match: (CMPQconst (MOVWQZX _) [c])
-       // cond: 0xFFFF < c
-       // result: (FlagLT_ULT)
+       // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
+       // cond:
+       // result: (LEAQ8 [c] {s} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVWQZX {
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               c := v.AuxInt
-               if !(0xFFFF < c) {
+               if v_1.AuxInt != 3 {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               y := v_1.Args[0]
+               v.reset(OpAMD64LEAQ8)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQconst (MOVLQZX _) [c])
-       // cond: 0xFFFFFFFF < c
-       // result: (FlagLT_ULT)
+       // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y)
+       // cond:
+       // result: (LEAQ8 [c] {s} y x)
        for {
+               c := v.AuxInt
+               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLQZX {
+               if v_0.Op != OpAMD64SHLQconst {
                        break
                }
-               c := v.AuxInt
-               if !(0xFFFFFFFF < c) {
+               if v_0.AuxInt != 3 {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               x := v_0.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64LEAQ8)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(y)
+               v.AddArg(x)
                return true
        }
-       // match: (CMPQconst (SHRQconst _ [c]) [n])
-       // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
-       // result: (FlagLT_ULT)
+       // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+       // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64SHRQconst {
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               c := v_0.AuxInt
-               n := v.AuxInt
-               if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               v.reset(OpAMD64LEAQ1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQconst (ANDQconst _ [m]) [n])
-       // cond: 0 <= m && m < n
-       // result: (FlagLT_ULT)
+       // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y))
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
+       // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDQconst {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64LEAQ {
                        break
                }
-               m := v_0.AuxInt
-               n := v.AuxInt
-               if !(0 <= m && m < n) {
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               y := v_1.Args[0]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               v.reset(OpAMD64LEAQ1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQconst (ANDQ x y) [0])
-       // cond:
-       // result: (TESTQ x y)
+       return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
+       // cond: is32Bit(c+d)   && x.Op != OpSB
+       // result: (LEAQ2 [c+d] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDQ {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
+               d := v_0.AuxInt
                x := v_0.Args[0]
-               y := v_0.Args[1]
-               if v.AuxInt != 0 {
+               y := v.Args[1]
+               if !(is32Bit(c+d) && x.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64TESTQ)
+               v.reset(OpAMD64LEAQ2)
+               v.AuxInt = c + d
+               v.Aux = s
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-       // match: (CMPQconst (ANDQconst [c] x) [0])
-       // cond:
-       // result: (TESTQconst [c] x)
+       // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
+       // cond: is32Bit(c+2*d) && y.Op != OpSB
+       // result: (LEAQ2 [c+2*d] {s} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDQconst {
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               if v.AuxInt != 0 {
+               d := v_1.AuxInt
+               y := v_1.Args[0]
+               if !(is32Bit(c+2*d) && y.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64TESTQconst)
-               v.AuxInt = c
+               v.reset(OpAMD64LEAQ2)
+               v.AuxInt = c + 2*d
+               v.Aux = s
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPQconst x [0])
+       // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
        // cond:
-       // result: (TESTQ x x)
+       // result: (LEAQ4 [c] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
-               if v.AuxInt != 0 {
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               v.reset(OpAMD64TESTQ)
-               v.AddArg(x)
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64LEAQ4)
+               v.AuxInt = c
+               v.Aux = s
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (CMPW x (MOVLconst [c]))
+       // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
        // cond:
-       // result: (CMPWconst x [int64(int16(c))])
+       // result: (LEAQ8 [c] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
+               if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpAMD64CMPWconst)
+               if v_1.AuxInt != 2 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64LEAQ8)
+               v.AuxInt = c
+               v.Aux = s
                v.AddArg(x)
-               v.AuxInt = int64(int16(c))
+               v.AddArg(y)
                return true
        }
-       // match: (CMPW (MOVLconst [c]) x)
-       // cond:
-       // result: (InvertFlags (CMPWconst x [int64(int16(c))]))
+       // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+       // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpAMD64InvertFlags)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v0.AddArg(x)
-               v0.AuxInt = int64(int16(c))
-               v.AddArg(v0)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64LEAQ2)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (CMPWconst (MOVLconst [x]) [y])
-       // cond: int16(x)==int16(y)
-       // result: (FlagEQ)
+       // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
+       // cond: is32Bit(c+d)   && x.Op != OpSB
+       // result: (LEAQ4 [c+d] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int16(x) == int16(y)) {
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(c+d) && x.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagEQ)
+               v.reset(OpAMD64LEAQ4)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPWconst (MOVLconst [x]) [y])
-       // cond: int16(x)<int16(y) && uint16(x)<uint16(y)
-       // result: (FlagLT_ULT)
+       // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
+       // cond: is32Bit(c+4*d) && y.Op != OpSB
+       // result: (LEAQ4 [c+4*d] {s} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
+               d := v_1.AuxInt
+               y := v_1.Args[0]
+               if !(is32Bit(c+4*d) && y.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               v.reset(OpAMD64LEAQ4)
+               v.AuxInt = c + 4*d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPWconst (MOVLconst [x]) [y])
-       // cond: int16(x)<int16(y) && uint16(x)>uint16(y)
-       // result: (FlagLT_UGT)
+       // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
+       // cond:
+       // result: (LEAQ8 [c] {s} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
+               if v_1.AuxInt != 1 {
                        break
                }
-               v.reset(OpAMD64FlagLT_UGT)
+               y := v_1.Args[0]
+               v.reset(OpAMD64LEAQ8)
+               v.AuxInt = c
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPWconst (MOVLconst [x]) [y])
-       // cond: int16(x)>int16(y) && uint16(x)<uint16(y)
-       // result: (FlagGT_ULT)
+       // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+       // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagGT_ULT)
+               v.reset(OpAMD64LEAQ4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPWconst (MOVLconst [x]) [y])
-       // cond: int16(x)>int16(y) && uint16(x)>uint16(y)
-       // result: (FlagGT_UGT)
+       return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
+       // cond: is32Bit(c+d)   && x.Op != OpSB
+       // result: (LEAQ8 [c+d] {s} x y)
        for {
+               c := v.AuxInt
+               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               x := v_0.AuxInt
-               y := v.AuxInt
-               if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               y := v.Args[1]
+               if !(is32Bit(c+d) && x.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagGT_UGT)
+               v.reset(OpAMD64LEAQ8)
+               v.AuxInt = c + d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPWconst (ANDLconst _ [m]) [n])
-       // cond: 0 <= int16(m) && int16(m) < int16(n)
-       // result: (FlagLT_ULT)
+       // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
+       // cond: is32Bit(c+8*d) && y.Op != OpSB
+       // result: (LEAQ8 [c+8*d] {s} x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               c := v.AuxInt
+               s := v.Aux
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               m := v_0.AuxInt
-               n := v.AuxInt
-               if !(0 <= int16(m) && int16(m) < int16(n)) {
+               d := v_1.AuxInt
+               y := v_1.Args[0]
+               if !(is32Bit(c+8*d) && y.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64FlagLT_ULT)
+               v.reset(OpAMD64LEAQ8)
+               v.AuxInt = c + 8*d
+               v.Aux = s
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (CMPWconst (ANDL x y) [0])
-       // cond:
-       // result: (TESTW x y)
+       // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
+       // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDL {
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
                x := v_0.Args[0]
-               y := v_0.Args[1]
-               if v.AuxInt != 0 {
+               y := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64TESTW)
+               v.reset(OpAMD64LEAQ8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-       // match: (CMPWconst (ANDLconst [c] x) [0])
-       // cond:
-       // result: (TESTWconst [int64(int16(c))] x)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               x := v.Args[0]
+               if x.Op != OpAMD64MOVBload {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               if v.AuxInt != 0 {
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               mem := x.Args[1]
+               if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(OpAMD64TESTWconst)
-               v.AuxInt = int64(int16(c))
-               v.AddArg(x)
+               b = x.Block
+               v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
                return true
        }
-       // match: (CMPWconst x [0])
-       // cond:
-       // result: (TESTW x x)
+       // match: (MOVBQSX (ANDLconst [c] x))
+       // cond: c & 0x80 == 0
+       // result: (ANDLconst [c & 0x7f] x)
        for {
-               x := v.Args[0]
-               if v.AuxInt != 0 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
                        break
                }
-               v.reset(OpAMD64TESTW)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(c&0x80 == 0) {
+                       break
+               }
+               v.reset(OpAMD64ANDLconst)
+               v.AuxInt = c & 0x7f
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ClosureCall [argwid] entry closure mem)
-       // cond:
-       // result: (CALLclosure [argwid] entry closure mem)
+       // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               closure := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64CALLclosure)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(closure)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBQSXload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
                v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com16 x)
-       // cond:
-       // result: (NOTL x)
-       for {
-               x := v.Args[0]
-               v.reset(OpAMD64NOTL)
-               v.AddArg(x)
-               return true
-       }
-}
-func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Com32 x)
-       // cond:
-       // result: (NOTL x)
+       // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
        for {
                x := v.Args[0]
-               v.reset(OpAMD64NOTL)
-               v.AddArg(x)
+               if x.Op != OpAMD64MOVBload {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               mem := x.Args[1]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com64 x)
-       // cond:
-       // result: (NOTQ x)
+       // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
        for {
                x := v.Args[0]
-               v.reset(OpAMD64NOTQ)
-               v.AddArg(x)
+               if x.Op != OpAMD64MOVBloadidx1 {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               idx := x.Args[1]
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com8  x)
+       // match: (MOVBQZX (ANDLconst [c] x))
        // cond:
-       // result: (NOTL x)
+       // result: (ANDLconst [c & 0xff] x)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64NOTL)
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64ANDLconst)
+               v.AuxInt = c & 0xff
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const16  [val])
-       // cond:
-       // result: (MOVLconst [val])
+       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               val := v.AuxInt
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = val
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVBstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32  [val])
-       // cond:
-       // result: (MOVLconst [val])
+       // match: (MOVBload  [off1] {sym} (ADDQconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVBload  [off1+off2] {sym} ptr mem)
        for {
-               val := v.AuxInt
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32F [val])
-       // cond:
-       // result: (MOVSSconst [val])
+       // match: (MOVBload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               val := v.AuxInt
-               v.reset(OpAMD64MOVSSconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const64  [val])
-       // cond:
-       // result: (MOVQconst [val])
+       // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               val := v.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBloadidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const64F [val])
-       // cond:
-       // result: (MOVSDconst [val])
+       // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVBloadidx1 [off] {sym} ptr idx mem)
        for {
-               val := v.AuxInt
-               v.reset(OpAMD64MOVSDconst)
-               v.AuxInt = val
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVBloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const8   [val])
+       // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
        // cond:
-       // result: (MOVLconst [val])
+       // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               val := v.AuxInt
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = val
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVBloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ConstBool [b])
+       // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
        // cond:
-       // result: (MOVLconst [b])
+       // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               b := v.AuxInt
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = b
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVBloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ConstNil)
+       // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
        // cond:
-       // result: (MOVQconst [0])
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = 0
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVBQSX {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Convert <t> x mem)
+       // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
        // cond:
-       // result: (MOVQconvert <t> x mem)
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpAMD64MOVQconvert)
-               v.Type = t
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVBQZX {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
                v.AddArg(x)
                v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Ctz16 <t> x)
-       // cond:
-       // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
+       // match: (MOVBstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVBstore  [off1+off2] {sym} ptr val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               v.reset(OpAMD64CMOVWEQconst)
-               v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v1.AddArg(x)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v.AuxInt = 16
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Ctz32 <t> x)
-       // cond:
-       // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
+       // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+       // cond: validOff(off)
+       // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               v.reset(OpAMD64CMOVLEQconst)
-               v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v1.AddArg(x)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v.AuxInt = 32
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(validOff(off)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBstoreconst)
+               v.AuxInt = makeValAndOff(int64(int8(c)), off)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Ctz64 <t> x)
-       // cond:
-       // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
+       // match: (MOVBstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               v.reset(OpAMD64CMOVQEQconst)
-               v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v1.AddArg(x)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v.AuxInt = 64
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto32 x)
-       // cond:
-       // result: (CVTTSS2SL x)
+       // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTTSS2SL)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto64 x)
-       // cond:
-       // result: (CVTTSS2SQ x)
+       // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTTSS2SQ)
-               v.AddArg(x)
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVBstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto64F x)
-       // cond:
-       // result: (CVTSS2SD x)
+       // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVWstore [i-1] {s} p w mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTSS2SD)
-               v.AddArg(x)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if v_1.AuxInt != 8 {
+                       break
+               }
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVBstore {
+                       break
+               }
+               if x.AuxInt != i-1 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if w != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstore)
+               v.AuxInt = i - 1
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(w)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32to32F x)
-       // cond:
-       // result: (CVTSL2SS x)
+       // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVWstore [i-1] {s} p w0 mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTSL2SS)
-               v.AddArg(x)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHRQconst {
+                       break
+               }
+               j := v_1.AuxInt
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVBstore {
+                       break
+               }
+               if x.AuxInt != i-1 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               w0 := x.Args[1]
+               if w0.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if w0.AuxInt != j-8 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstore)
+               v.AuxInt = i - 1
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(w0)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32to64F x)
-       // cond:
-       // result: (CVTSL2SD x)
+       // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+       // cond: ValAndOff(sc).canAdd(off)
+       // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTSL2SD)
-               v.AddArg(x)
+               sc := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(ValAndOff(sc).canAdd(off)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = s
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32 x)
-       // cond:
-       // result: (CVTTSD2SL x)
+       // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+       // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTTSD2SL)
-               v.AddArg(x)
+               sc := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32F x)
-       // cond:
-       // result: (CVTSD2SS x)
+       // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTSD2SS)
-               v.AddArg(x)
+               x := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVBstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto64 x)
+       // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem)
        // cond:
-       // result: (CVTTSD2SQ x)
+       // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTTSD2SQ)
-               v.AddArg(x)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               v.reset(OpAMD64MOVBstoreconstidx1)
+               v.AuxInt = x
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64to32F x)
-       // cond:
-       // result: (CVTSQ2SS x)
+       // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTSQ2SS)
-               v.AddArg(x)
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               x := v.Args[1]
+               if x.Op != OpAMD64MOVBstoreconst {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               mem := x.Args[1]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstoreconst)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64to64F x)
+       // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
        // cond:
-       // result: (CVTSQ2SD x)
+       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               v.reset(OpAMD64CVTSQ2SD)
-               v.AddArg(x)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVBstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (DeferCall [argwid] mem)
+       // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
        // cond:
-       // result: (CALLdefer [argwid] mem)
+       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpAMD64CALLdefer)
-               v.AuxInt = argwid
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVBstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div16  x y)
-       // cond:
-       // result: (Select0 (DIVW  x y))
+       // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               i := v.Args[1]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVBstoreconstidx1 {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if i != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstoreconstidx1)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(i)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div16u x y)
+       // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (Select0 (DIVWU x y))
+       // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVBstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32  x y)
+       // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (Select0 (DIVL  x y))
+       // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVBstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32F x y)
-       // cond:
-       // result: (DIVSS x y)
+       // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64DIVSS)
-               v.AddArg(x)
-               v.AddArg(y)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if v_2.AuxInt != 8 {
+                       break
+               }
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVBstoreidx1 {
+                       break
+               }
+               if x.AuxInt != i-1 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               if w != x.Args[2] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstoreidx1)
+               v.AuxInt = i - 1
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(idx)
+               v.AddArg(w)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32u x y)
-       // cond:
-       // result: (Select0 (DIVLU x y))
+       // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
+                       break
+               }
+               j := v_2.AuxInt
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVBstoreidx1 {
+                       break
+               }
+               if x.AuxInt != i-1 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               w0 := x.Args[2]
+               if w0.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if w0.AuxInt != j-8 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstoreidx1)
+               v.AuxInt = i - 1
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(idx)
+               v.AddArg(w0)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div64  x y)
-       // cond:
-       // result: (Select0 (DIVQ  x y))
+       // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-               v0.AddArg(x)
-               v0.AddArg(y)
+               if x.Op != OpAMD64MOVLload {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               mem := x.Args[1]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type)
+               v.reset(OpCopy)
                v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div64F x y)
-       // cond:
-       // result: (DIVSD x y)
+       // match: (MOVLQSX (ANDLconst [c] x))
+       // cond: c & 0x80000000 == 0
+       // result: (ANDLconst [c & 0x7fffffff] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64DIVSD)
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(c&0x80000000 == 0) {
+                       break
+               }
+               v.reset(OpAMD64ANDLconst)
+               v.AuxInt = c & 0x7fffffff
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div64u x y)
-       // cond:
-       // result: (Select0 (DIVQU x y))
+       // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLQSXload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div8   x y)
-       // cond:
-       // result: (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
+       // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
-               v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               if x.Op != OpAMD64MOVLload {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               mem := x.Args[1]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type)
+               v.reset(OpCopy)
                v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div8u  x y)
-       // cond:
-       // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+       // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect0)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
-               v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               if x.Op != OpAMD64MOVLloadidx1 {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               idx := x.Args[1]
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
+               v.reset(OpCopy)
                v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq16  x y)
-       // cond:
-       // result: (SETEQ (CMPW x y))
+       // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETEQ)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               if x.Op != OpAMD64MOVLloadidx4 {
+                       break
+               }
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               idx := x.Args[1]
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type)
+               v.reset(OpCopy)
                v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq32  x y)
+       // match: (MOVLQZX (ANDLconst [c] x))
        // cond:
-       // result: (SETEQ (CMPL x y))
+       // result: (ANDLconst [c] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETEQ)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ANDLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64ANDLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq32F x y)
-       // cond:
-       // result: (SETEQF (UCOMISS x y))
+       // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETEQF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq64  x y)
-       // cond:
-       // result: (SETEQ (CMPQ x y))
+       // match: (MOVLload  [off1] {sym} (ADDQconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVLload  [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETEQ)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq64F x y)
-       // cond:
-       // result: (SETEQF (UCOMISD x y))
+       // match: (MOVLload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETEQF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq8   x y)
-       // cond:
-       // result: (SETEQ (CMPB x y))
+       // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETEQ)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLloadidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ4 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLloadidx4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVLloadidx1 [off] {sym} ptr idx mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVLloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (EqB   x y)
+       // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
        // cond:
-       // result: (SETEQ (CMPB x y))
+       // result: (MOVLloadidx4 [c] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETEQ)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
+                       break
+               }
+               if v_1.AuxInt != 2 {
+                       break
+               }
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLloadidx4)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (EqPtr x y)
+       // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
        // cond:
-       // result: (SETEQ (CMPQ x y))
+       // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETEQ)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq16  x y)
+       // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
        // cond:
-       // result: (SETGE (CMPW x y))
+       // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq16U x y)
+       // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
        // cond:
-       // result: (SETAE (CMPW x y))
+       // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETAE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLloadidx4)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32  x y)
+       // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
        // cond:
-       // result: (SETGE (CMPL x y))
+       // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLloadidx4)
+               v.AuxInt = c + 4*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq32F x y)
+       // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
        // cond:
-       // result: (SETGEF (UCOMISS x y))
+       // result: (MOVLstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGEF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLQSX {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32U x y)
+       // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
        // cond:
-       // result: (SETAE (CMPL x y))
+       // result: (MOVLstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETAE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLQZX {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq64  x y)
-       // cond:
-       // result: (SETGE (CMPQ x y))
+       // match: (MOVLstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVLstore  [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq64F x y)
-       // cond:
-       // result: (SETGEF (UCOMISD x y))
+       // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+       // cond: validOff(off)
+       // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGEF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(validOff(off)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreconst)
+               v.AuxInt = makeValAndOff(int64(int32(c)), off)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq64U x y)
-       // cond:
-       // result: (SETAE (CMPQ x y))
+       // match: (MOVLstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETAE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq8   x y)
-       // cond:
-       // result: (SETGE (CMPB x y))
+       // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq8U  x y)
-       // cond:
-       // result: (SETAE (CMPB x y))
+       // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETAE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ4 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreidx4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GetClosurePtr)
-       // cond:
-       // result: (LoweredGetClosurePtr)
+       // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
        for {
-               v.reset(OpAMD64LoweredGetClosurePtr)
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GetG mem)
-       // cond:
-       // result: (LoweredGetG mem)
+       // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVQstore [i-4] {s} p w mem)
        for {
-               mem := v.Args[0]
-               v.reset(OpAMD64LoweredGetG)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if v_1.AuxInt != 32 {
+                       break
+               }
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVLstore {
+                       break
+               }
+               if x.AuxInt != i-4 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if w != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstore)
+               v.AuxInt = i - 4
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(w)
                v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GoCall [argwid] mem)
-       // cond:
-       // result: (CALLgo [argwid] mem)
+       // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVQstore [i-4] {s} p w0 mem)
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpAMD64CALLgo)
-               v.AuxInt = argwid
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHRQconst {
+                       break
+               }
+               j := v_1.AuxInt
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVLstore {
+                       break
+               }
+               if x.AuxInt != i-4 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               w0 := x.Args[1]
+               if w0.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if w0.AuxInt != j-32 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstore)
+               v.AuxInt = i - 4
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(w0)
                v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater16  x y)
-       // cond:
-       // result: (SETG (CMPW x y))
+       // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+       // cond: ValAndOff(sc).canAdd(off)
+       // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETG)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater16U x y)
-       // cond:
-       // result: (SETA (CMPW x y))
+               sc := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(ValAndOff(sc).canAdd(off)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = s
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+       // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETA)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               sc := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32  x y)
-       // cond:
-       // result: (SETG (CMPL x y))
+       // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETG)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32F x y)
-       // cond:
-       // result: (SETGF (UCOMISS x y))
+       // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ4 {
+                       break
+               }
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreconstidx4)
+               v.AuxInt = ValAndOff(x).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32U x y)
+       // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem)
        // cond:
-       // result: (SETA (CMPL x y))
+       // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETA)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               v.reset(OpAMD64MOVLstoreconstidx1)
+               v.AuxInt = x
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater64  x y)
-       // cond:
-       // result: (SETG (CMPQ x y))
+       // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETG)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               x := v.Args[1]
+               if x.Op != OpAMD64MOVLstoreconst {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               mem := x.Args[1]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstore)
+               v.AuxInt = ValAndOff(a).Off()
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+               v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
                v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater64F x y)
+       // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
        // cond:
-       // result: (SETGF (UCOMISD x y))
+       // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
+                       break
+               }
+               if v_1.AuxInt != 2 {
+                       break
+               }
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLstoreconstidx4)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater64U x y)
+       // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
        // cond:
-       // result: (SETA (CMPQ x y))
+       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETA)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater8   x y)
+       // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
        // cond:
-       // result: (SETG (CMPB x y))
+       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETG)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater8U  x y)
-       // cond:
-       // result: (SETA (CMPB x y))
+       // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETA)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               i := v.Args[1]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVLstoreconstidx1 {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if i != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = ValAndOff(a).Off()
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(i)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+               v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
                v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul16  x y)
+       // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
        // cond:
-       // result: (HMULW  x y)
+       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64HMULW)
-               v.AddArg(x)
-               v.AddArg(y)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLstoreconstidx4)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul16u x y)
+       // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
        // cond:
-       // result: (HMULWU x y)
+       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64HMULWU)
-               v.AddArg(x)
-               v.AddArg(y)
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVLstoreconstidx4)
+               v.AuxInt = ValAndOff(x).add(4 * c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul32  x y)
-       // cond:
-       // result: (HMULL  x y)
+       // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64HMULL)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               i := v.Args[1]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVLstoreconstidx4 {
+                       break
+               }
+               a := x.AuxInt
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if i != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = ValAndOff(a).Off()
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
+               v0.AuxInt = 2
+               v0.AddArg(i)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+               v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
+               v.AddArg(v1)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul32u x y)
+       // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
        // cond:
-       // result: (HMULLU x y)
+       // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64HMULLU)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
+                       break
+               }
+               if v_1.AuxInt != 2 {
+                       break
+               }
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVLstoreidx4)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul64  x y)
+       // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (HMULQ  x y)
+       // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64HMULQ)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVLstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul64u x y)
+       // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (HMULQU x y)
+       // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64HMULQU)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVLstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul8   x y)
-       // cond:
-       // result: (HMULB  x y)
+       // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVQstoreidx1 [i-4] {s} p idx w mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64HMULB)
-               v.AddArg(x)
-               v.AddArg(y)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if v_2.AuxInt != 32 {
+                       break
+               }
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVLstoreidx1 {
+                       break
+               }
+               if x.AuxInt != i-4 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               if w != x.Args[2] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = i - 4
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(idx)
+               v.AddArg(w)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul8u  x y)
-       // cond:
-       // result: (HMULBU x y)
+       // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64HMULBU)
-               v.AddArg(x)
-               v.AddArg(y)
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
+                       break
+               }
+               j := v_2.AuxInt
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVLstoreidx1 {
+                       break
+               }
+               if x.AuxInt != i-4 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               w0 := x.Args[2]
+               if w0.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if w0.AuxInt != j-32 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = i - 4
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(idx)
+               v.AddArg(w0)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (InterCall [argwid] entry mem)
+       // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (CALLinter [argwid] entry mem)
+       // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpAMD64CALLinter)
-               v.AuxInt = argwid
-               v.AddArg(entry)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVLstoreidx4)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsInBounds idx len)
+       // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (SETB (CMPQ idx len))
+       // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpAMD64SETB)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVLstoreidx4)
+               v.AuxInt = c + 4*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsNonNil p)
-       // cond:
-       // result: (SETNE (TESTQ p p))
+       // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
        for {
+               i := v.AuxInt
+               s := v.Aux
                p := v.Args[0]
-               v.reset(OpAMD64SETNE)
-               v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
-               v0.AddArg(p)
-               v0.AddArg(p)
-               v.AddArg(v0)
-               return true
-       }
-}
-func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsSliceInBounds idx len)
-       // cond:
-       // result: (SETBE (CMPQ idx len))
-       for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpAMD64SETBE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if v_2.AuxInt != 32 {
+                       break
+               }
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVLstoreidx4 {
+                       break
+               }
+               if x.AuxInt != i-4 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               if w != x.Args[2] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = i - 4
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
+               v0.AuxInt = 2
                v0.AddArg(idx)
-               v0.AddArg(len)
                v.AddArg(v0)
+               v.AddArg(w)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LEAQ [c] {s} (ADDQconst [d] x))
-       // cond: is32Bit(c+d)
-       // result: (LEAQ [c+d] {s} x)
+       // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
        for {
-               c := v.AuxInt
+               i := v.AuxInt
                s := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               if !(is32Bit(c + d)) {
+               j := v_2.AuxInt
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVLstoreidx4 {
                        break
                }
-               v.reset(OpAMD64LEAQ)
-               v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               return true
-       }
-       // match: (LEAQ [c] {s} (ADDQ x y))
-       // cond: x.Op != OpSB && y.Op != OpSB
-       // result: (LEAQ1 [c] {s} x y)
-       for {
-               c := v.AuxInt
-               s := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if x.AuxInt != i-4 {
                        break
                }
-               x := v_0.Args[0]
-               y := v_0.Args[1]
-               if !(x.Op != OpSB && y.Op != OpSB) {
+               if x.Aux != s {
                        break
                }
-               v.reset(OpAMD64LEAQ1)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-       // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if p != x.Args[0] {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if idx != x.Args[1] {
                        break
                }
-               v.reset(OpAMD64LEAQ)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
+               w0 := x.Args[2]
+               if w0.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if w0.AuxInt != j-32 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = i - 4
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
+               v0.AuxInt = 2
+               v0.AddArg(idx)
+               v.AddArg(v0)
+               v.AddArg(w0)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVOload  [off1] {sym} (ADDQconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVOload  [off1+off2] {sym} ptr mem)
        for {
                off1 := v.AuxInt
-               sym1 := v.Aux
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
                off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v_0.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ1)
+               v.reset(OpAMD64MOVOload)
                v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
+       // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ2 {
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v_0.Args[1]
+               base := v_0.Args[0]
+               mem := v.Args[1]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ2)
+               v.reset(OpAMD64MOVOload)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVOstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVOstore  [off1+off2] {sym} ptr val mem)
        for {
                off1 := v.AuxInt
-               sym1 := v.Aux
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ4 {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
                off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v_0.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ4)
+               v.reset(OpAMD64MOVOstore)
                v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
+       // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ8 {
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v_0.Args[1]
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ8)
+               v.reset(OpAMD64MOVOstore)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(base)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
-       // cond: is32Bit(c+d)   && x.Op != OpSB
-       // result: (LEAQ1 [c+d] {s} x y)
+       // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               c := v.AuxInt
-               s := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQstore {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(c+d) && x.Op != OpSB) {
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ1)
-               v.AuxInt = c + d
-               v.Aux = s
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-       // match: (LEAQ1 [c] {s} x (ADDQconst [d] y))
-       // cond: is32Bit(c+d)   && y.Op != OpSB
-       // result: (LEAQ1 [c+d] {s} x y)
+       // match: (MOVQload  [off1] {sym} (ADDQconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVQload  [off1+off2] {sym} ptr mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               d := v_1.AuxInt
-               y := v_1.Args[0]
-               if !(is32Bit(c+d) && y.Op != OpSB) {
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ1)
-               v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
-       // cond:
-       // result: (LEAQ2 [c] {s} x y)
+       // match: (MOVQload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               if v_1.AuxInt != 1 {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(OpAMD64LEAQ2)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y)
-       // cond:
-       // result: (LEAQ2 [c] {s} y x)
+       // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64SHLQconst {
+               if v_0.Op != OpAMD64LEAQ1 {
                        break
                }
-               if v_0.AuxInt != 1 {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               x := v_0.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64LEAQ2)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(y)
-               v.AddArg(x)
+               v.reset(OpAMD64MOVQloadidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
-       // cond:
-       // result: (LEAQ4 [c] {s} x y)
+       // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ8 {
                        break
                }
-               if v_1.AuxInt != 2 {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(OpAMD64LEAQ4)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQloadidx8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y)
-       // cond:
-       // result: (LEAQ4 [c] {s} y x)
+       // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVQloadidx1 [off] {sym} ptr idx mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               off := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64SHLQconst {
+               if v_0.Op != OpAMD64ADDQ {
                        break
                }
-               if v_0.AuxInt != 2 {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
                        break
                }
-               x := v_0.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64LEAQ4)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(y)
-               v.AddArg(x)
+               v.reset(OpAMD64MOVQloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
        // cond:
-       // result: (LEAQ8 [c] {s} x y)
+       // result: (MOVQloadidx8 [c] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpAMD64SHLQconst {
                        break
@@ -4463,182 +5992,165 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
                if v_1.AuxInt != 3 {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(OpAMD64LEAQ8)
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQloadidx8)
                v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y)
+       // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
        // cond:
-       // result: (LEAQ8 [c] {s} y x)
+       // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if v_0.AuxInt != 3 {
-                       break
-               }
-               x := v_0.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64LEAQ8)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(y)
-               v.AddArg(x)
-               return true
-       }
-       // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-       // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               v.reset(OpAMD64LEAQ1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y))
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
-       // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
+       // cond:
+       // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               x := v.Args[0]
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64LEAQ {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               y := v_1.Args[0]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               v.reset(OpAMD64LEAQ1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
-       // cond: is32Bit(c+d)   && x.Op != OpSB
-       // result: (LEAQ2 [c+d] {s} x y)
+       // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
+       // cond:
+       // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(c+d) && x.Op != OpSB) {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               v.reset(OpAMD64LEAQ2)
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQloadidx8)
                v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
-       // cond: is32Bit(c+2*d) && y.Op != OpSB
-       // result: (LEAQ2 [c+2*d] {s} x y)
+       // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
+       // cond:
+       // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
        for {
                c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpAMD64ADDQconst {
                        break
                }
                d := v_1.AuxInt
-               y := v_1.Args[0]
-               if !(is32Bit(c+2*d) && y.Op != OpSB) {
-                       break
-               }
-               v.reset(OpAMD64LEAQ2)
-               v.AuxInt = c + 2*d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQloadidx8)
+               v.AuxInt = c + 8*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
-       // cond:
-       // result: (LEAQ4 [c] {s} x y)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVQstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVQstore  [off1+off2] {sym} ptr val mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               if v_1.AuxInt != 1 {
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(OpAMD64LEAQ4)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
-       // cond:
-       // result: (LEAQ8 [c] {s} x y)
+       // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
+       // cond: validValAndOff(c,off)
+       // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               if v_1.AuxInt != 2 {
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(validValAndOff(c, off)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(OpAMD64LEAQ8)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQstoreconst)
+               v.AuxInt = makeValAndOff(c, off)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-       // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVQstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
@@ -4648,1088 +6160,1432 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ2)
+               v.reset(OpAMD64MOVQstore)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(base)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
-       // cond: is32Bit(c+d)   && x.Op != OpSB
-       // result: (LEAQ4 [c+d] {s} x y)
+       // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(c+d) && x.Op != OpSB) {
-                       break
-               }
-               v.reset(OpAMD64LEAQ4)
-               v.AuxInt = c + d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-       // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
-       // cond: is32Bit(c+4*d) && y.Op != OpSB
-       // result: (LEAQ4 [c+4*d] {s} x y)
-       for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64LEAQ1 {
                        break
                }
-               d := v_1.AuxInt
-               y := v_1.Args[0]
-               if !(is32Bit(c+4*d) && y.Op != OpSB) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ4)
-               v.AuxInt = c + 4*d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
-       // cond:
-       // result: (LEAQ8 [c] {s} x y)
+       // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ8 {
                        break
                }
-               if v_1.AuxInt != 1 {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(OpAMD64LEAQ8)
-               v.AuxInt = c
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQstoreidx8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-       // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               off := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if v_0.Op != OpAMD64ADDQ {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64LEAQ4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
-       // cond: is32Bit(c+d)   && x.Op != OpSB
-       // result: (LEAQ8 [c+d] {s} x y)
+       // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+       // cond: ValAndOff(sc).canAdd(off)
+       // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
        for {
-               c := v.AuxInt
+               sc := v.AuxInt
                s := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(c+d) && x.Op != OpSB) {
+               off := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(ValAndOff(sc).canAdd(off)) {
                        break
                }
-               v.reset(OpAMD64LEAQ8)
-               v.AuxInt = c + d
+               v.reset(OpAMD64MOVQstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
                v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
-       // cond: is32Bit(c+8*d) && y.Op != OpSB
-       // result: (LEAQ8 [c+8*d] {s} x y)
+       // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+       // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               sc := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               d := v_1.AuxInt
-               y := v_1.Args[0]
-               if !(is32Bit(c+8*d) && y.Op != OpSB) {
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
                        break
                }
-               v.reset(OpAMD64LEAQ8)
-               v.AuxInt = c + 8*d
-               v.Aux = s
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVQstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-       // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+       // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               off1 := v.AuxInt
+               x := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if v_0.Op != OpAMD64LEAQ1 {
                        break
                }
-               off2 := v_0.AuxInt
+               off := v_0.AuxInt
                sym2 := v_0.Aux
-               x := v_0.Args[0]
-               y := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64LEAQ8)
-               v.AuxInt = off1 + off2
+               v.reset(OpAMD64MOVQstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(off)
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq16  x y)
-       // cond:
-       // result: (SETLE (CMPW x y))
+       // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETLE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ8 {
+                       break
+               }
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVQstoreconstidx8)
+               v.AuxInt = ValAndOff(x).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq16U x y)
+       // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem)
        // cond:
-       // result: (SETBE (CMPW x y))
+       // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETBE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               v.reset(OpAMD64MOVQstoreconstidx1)
+               v.AuxInt = x
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq32  x y)
+       // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
        // cond:
-       // result: (SETLE (CMPL x y))
+       // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETLE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
+                       break
+               }
+               if v_1.AuxInt != 3 {
+                       break
+               }
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQstoreconstidx8)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32F x y)
+       // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
        // cond:
-       // result: (SETGEF (UCOMISS y x))
+       // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGEF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32U x y)
+       // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
        // cond:
-       // result: (SETBE (CMPL x y))
+       // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETBE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq64  x y)
+       // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
        // cond:
-       // result: (SETLE (CMPQ x y))
+       // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETLE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQstoreconstidx8)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq64F x y)
+       // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
        // cond:
-       // result: (SETGEF (UCOMISD y x))
+       // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGEF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVQstoreconstidx8)
+               v.AuxInt = ValAndOff(x).add(8 * c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq64U x y)
+       // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
        // cond:
-       // result: (SETBE (CMPQ x y))
+       // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETBE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHLQconst {
+                       break
+               }
+               if v_1.AuxInt != 3 {
+                       break
+               }
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVQstoreidx8)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8   x y)
+       // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (SETLE (CMPB x y))
+       // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETLE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8U  x y)
+       // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (SETBE (CMPB x y))
+       // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETBE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVQstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less16  x y)
+       // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (SETL (CMPW x y))
+       // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETL)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVQstoreidx8)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less16U x y)
+       // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (SETB (CMPW x y))
+       // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETB)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVQstoreidx8)
+               v.AuxInt = c + 8*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less32  x y)
-       // cond:
-       // result: (SETL (CMPL x y))
+       // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVSDload [off1+off2] {sym} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETL)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32F x y)
-       // cond:
-       // result: (SETGF (UCOMISS y x))
+       // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDloadidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32U x y)
-       // cond:
-       // result: (SETB (CMPL x y))
+       // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETB)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ8 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDloadidx8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less64  x y)
-       // cond:
-       // result: (SETL (CMPQ x y))
+       // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETL)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less64F x y)
+       // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
        // cond:
-       // result: (SETGF (UCOMISD y x))
+       // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETGF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVSDloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less64U x y)
+       // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
        // cond:
-       // result: (SETB (CMPQ x y))
+       // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETB)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVSDloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less8   x y)
+       // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
        // cond:
-       // result: (SETL (CMPB x y))
+       // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETL)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVSDloadidx8)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less8U  x y)
+       // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
        // cond:
-       // result: (SETB (CMPB x y))
+       // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETB)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVSDloadidx8)
+               v.AuxInt = c + 8*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Load <t> ptr mem)
-       // cond: (is64BitInt(t) || isPtr(t))
-       // result: (MOVQload ptr mem)
+       // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is64BitInt(t) || isPtr(t)) {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               v.reset(OpAMD64MOVQload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Load <t> ptr mem)
-       // cond: is32BitInt(t)
-       // result: (MOVLload ptr mem)
-       for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitInt(t)) {
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
                        break
                }
-               v.reset(OpAMD64MOVLload)
+               v.reset(OpAMD64MOVSDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is16BitInt(t)
-       // result: (MOVWload ptr mem)
+       // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t)) {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               v.reset(OpAMD64MOVWload)
-               v.AddArg(ptr)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (t.IsBoolean() || is8BitInt(t))
-       // result: (MOVBload ptr mem)
+       // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(t.IsBoolean() || is8BitInt(t)) {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
                        break
                }
-               v.reset(OpAMD64MOVBload)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is32BitFloat(t)
-       // result: (MOVSSload ptr mem)
+       // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitFloat(t)) {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ8 {
                        break
                }
-               v.reset(OpAMD64MOVSSload)
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDstoreidx8)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is64BitFloat(t)
-       // result: (MOVSDload ptr mem)
+       // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is64BitFloat(t)) {
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
                        break
                }
-               v.reset(OpAMD64MOVSDload)
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVSDstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot16 <t> x [c])
-       // cond:
-       // result: (ROLWconst <t> [c&15] x)
-       for {
-               t := v.Type
-               x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpAMD64ROLWconst)
-               v.Type = t
-               v.AuxInt = c & 15
-               v.AddArg(x)
-               return true
-       }
-}
-func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot32 <t> x [c])
-       // cond:
-       // result: (ROLLconst <t> [c&31] x)
-       for {
-               t := v.Type
-               x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpAMD64ROLLconst)
-               v.Type = t
-               v.AuxInt = c & 31
-               v.AddArg(x)
-               return true
-       }
-}
-func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot64 <t> x [c])
+       // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (ROLQconst <t> [c&63] x)
+       // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
                c := v.AuxInt
-               v.reset(OpAMD64ROLQconst)
-               v.Type = t
-               v.AuxInt = c & 63
-               v.AddArg(x)
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVSDstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot8  <t> x [c])
+       // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (ROLBconst <t> [c&7] x)
+       // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
                c := v.AuxInt
-               v.reset(OpAMD64ROLBconst)
-               v.Type = t
-               v.AuxInt = c & 7
-               v.AddArg(x)
-               return true
-       }
-}
-func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x16 <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-       for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVSDstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x32 <t> x y)
+       // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+       // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVSDstoreidx8)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x64 <t> x y)
+       // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+       // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVSDstoreidx8)
+               v.AuxInt = c + 8*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x8  <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+       // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVSSload [off1+off2] {sym} ptr mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x16 <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+       // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x32 <t> x y)
-       // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+       // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSloadidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ4 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSloadidx4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSloadidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x64 <t> x y)
+       // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+       // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVSSloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x8  <t> x y)
+       // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+       // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVSSloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh64x16 <t> x y)
+       // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
        // cond:
-       // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+       // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 64
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVSSloadidx4)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh64x32 <t> x y)
+       // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
        // cond:
-       // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+       // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 64
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVSSloadidx4)
+               v.AuxInt = c + 4*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh64x64 <t> x y)
-       // cond:
-       // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+       // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 64
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh64x8  <t> x y)
-       // cond:
-       // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+       // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 64
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ4 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSstoreidx4)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
+                       break
+               }
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVSSstoreidx1)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x16 <t> x y)
+       // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+       // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVSSstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x32 <t> x y)
+       // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+       // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVSSstoreidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x64 <t> x y)
+       // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+       // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVSSstoreidx4)
+               v.AuxInt = c + d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x8  <t> x y)
+       // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+       // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
+                       break
+               }
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVSSstoreidx4)
+               v.AuxInt = c + 4*d
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
+       // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
        // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+       // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
        for {
                x := v.Args[0]
-               if x.Op != OpAMD64MOVBload {
+               if x.Op != OpAMD64MOVWload {
                        break
                }
                off := x.AuxInt
@@ -5740,7 +7596,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
                        break
                }
                b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
                v.reset(OpCopy)
                v.AddArg(v0)
                v0.AuxInt = off
@@ -5749,9 +7605,9 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
                v0.AddArg(mem)
                return true
        }
-       // match: (MOVBQSX (ANDLconst [c] x))
-       // cond: c & 0x80 == 0
-       // result: (ANDLconst [c & 0x7f] x)
+       // match: (MOVWQSX (ANDLconst [c] x))
+       // cond: c & 0x8000 == 0
+       // result: (ANDLconst [c & 0x7fff] x)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64ANDLconst {
@@ -5759,22 +7615,22 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v_0.Args[0]
-               if !(c&0x80 == 0) {
+               if !(c&0x8000 == 0) {
                        break
                }
                v.reset(OpAMD64ANDLconst)
-               v.AuxInt = c & 0x7f
+               v.AuxInt = c & 0x7fff
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
@@ -5789,7 +7645,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64MOVBQSXload)
+               v.reset(OpAMD64MOVWQSXload)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(base)
@@ -5798,15 +7654,15 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
+       // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
        // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+       // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
        for {
                x := v.Args[0]
-               if x.Op != OpAMD64MOVBload {
+               if x.Op != OpAMD64MOVWload {
                        break
                }
                off := x.AuxInt
@@ -5817,7 +7673,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
                        break
                }
                b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
                v.reset(OpCopy)
                v.AddArg(v0)
                v0.AuxInt = off
@@ -5826,12 +7682,12 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
                v0.AddArg(mem)
                return true
        }
-       // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
+       // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
        // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
+       // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
        for {
                x := v.Args[0]
-               if x.Op != OpAMD64MOVBloadidx1 {
+               if x.Op != OpAMD64MOVWloadidx1 {
                        break
                }
                off := x.AuxInt
@@ -5843,7 +7699,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
                        break
                }
                b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
                v.reset(OpCopy)
                v.AddArg(v0)
                v0.AuxInt = off
@@ -5853,245 +7709,79 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
                v0.AddArg(mem)
                return true
        }
-       // match: (MOVBQZX (ANDLconst [c] x))
-       // cond:
-       // result: (ANDLconst [c & 0xff] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64ANDLconst)
-               v.AuxInt = c & 0xff
-               v.AddArg(x)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
+       // cond: x.Uses == 1 && clobber(x)
+       // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVBstore {
+               x := v.Args[0]
+               if x.Op != OpAMD64MOVWloadidx2 {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               off := x.AuxInt
+               sym := x.Aux
+               ptr := x.Args[0]
+               idx := x.Args[1]
+               mem := x.Args[2]
+               if !(x.Uses == 1 && clobber(x)) {
                        break
                }
+               b = x.Block
+               v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type)
                v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (MOVBload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVBload  [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
-                       break
-               }
-               v.reset(OpAMD64MOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpAMD64MOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpAMD64MOVBloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVBloadidx1 [off] {sym} ptr idx mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
-                       break
-               }
-               v.reset(OpAMD64MOVBloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.AddArg(v0)
+               v0.AuxInt = off
+               v0.Aux = sym
+               v0.AddArg(ptr)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
+       // match: (MOVWQZX (ANDLconst [c] x))
        // cond:
-       // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (ANDLconst [c & 0xffff] x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVBloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-       // cond:
-       // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64ANDLconst {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVBloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64ANDLconst)
+               v.AuxInt = c & 0xffff
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
-       // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVBQSX {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
-       // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
                off := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVBQZX {
+               if v_1.Op != OpAMD64MOVWstore {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
+       // match: (MOVWload  [off1] {sym} (ADDQconst [off2] ptr) mem)
        // cond: is32Bit(off1+off2)
-       // result: (MOVBstore  [off1+off2] {sym} ptr val mem)
+       // result: (MOVWload  [off1+off2] {sym} ptr mem)
        for {
                off1 := v.AuxInt
                sym := v.Aux
@@ -6101,99 +7791,96 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
                }
                off2 := v_0.AuxInt
                ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
+               mem := v.Args[1]
                if !(is32Bit(off1 + off2)) {
                        break
                }
-               v.reset(OpAMD64MOVBstore)
+               v.reset(OpAMD64MOVWload)
                v.AuxInt = off1 + off2
                v.Aux = sym
                v.AddArg(ptr)
-               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
-       // cond: validOff(off)
-       // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
+       // match: (MOVWload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(validOff(off)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64MOVBstoreconst)
-               v.AuxInt = makeValAndOff(int64(int8(c)), off)
-               v.Aux = sym
-               v.AddArg(ptr)
+               v.reset(OpAMD64MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+       // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if v_0.Op != OpAMD64LEAQ1 {
                        break
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64MOVBstore)
+               v.reset(OpAMD64MOVWloadidx1)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
+               v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
+       // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
        // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
                off1 := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if v_0.Op != OpAMD64LEAQ2 {
                        break
                }
                off2 := v_0.AuxInt
                sym2 := v_0.Aux
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
+               mem := v.Args[1]
                if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64MOVBstoreidx1)
+               v.reset(OpAMD64MOVWloadidx2)
                v.AuxInt = off1 + off2
                v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
                v.AddArg(idx)
-               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
+       // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem)
        // cond: ptr.Op != OpSB
-       // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
+       // result: (MOVWloadidx1 [off] {sym} ptr idx mem)
        for {
                off := v.AuxInt
                sym := v.Aux
@@ -6203,368 +7890,336 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
                }
                ptr := v_0.Args[0]
                idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
+               mem := v.Args[1]
                if !(ptr.Op != OpSB) {
                        break
                }
-               v.reset(OpAMD64MOVBstoreidx1)
+               v.reset(OpAMD64MOVWloadidx1)
                v.AuxInt = off
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVWstore [i-1] {s} p w mem)
-       for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHRQconst {
-                       break
-               }
-               if v_1.AuxInt != 8 {
-                       break
-               }
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVBstore {
-                       break
-               }
-               if x.AuxInt != i-1 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if w != x.Args[1] {
-                       break
-               }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
-                       break
-               }
-               v.reset(OpAMD64MOVWstore)
-               v.AuxInt = i - 1
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVWstore [i-1] {s} p w0 mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
+       // cond:
+       // result: (MOVWloadidx2 [c] {sym} ptr idx mem)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHRQconst {
-                       break
-               }
-               j := v_1.AuxInt
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVBstore {
-                       break
-               }
-               if x.AuxInt != i-1 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               w0 := x.Args[1]
-               if w0.Op != OpAMD64SHRQconst {
-                       break
-               }
-               if w0.AuxInt != j-8 {
-                       break
-               }
-               if w != w0.Args[0] {
+               if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               if v_1.AuxInt != 1 {
                        break
                }
-               v.reset(OpAMD64MOVWstore)
-               v.AuxInt = i - 1
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w0)
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWloadidx2)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
-       // cond: ValAndOff(sc).canAdd(off)
-       // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+       // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
+       // cond:
+       // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               sc := v.AuxInt
-               s := v.Aux
+               c := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               off := v_0.AuxInt
+               d := v_0.AuxInt
                ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(ValAndOff(sc).canAdd(off)) {
-                       break
-               }
-               v.reset(OpAMD64MOVBstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = s
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-       // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+       // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
+       // cond:
+       // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
        for {
-               sc := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
-                       break
-               }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+               c := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               v.reset(OpAMD64MOVBstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = mergeSym(sym1, sym2)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWloadidx1)
+               v.AuxInt = c + d
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
+       // cond:
+       // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
        for {
-               x := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
+               d := v_0.AuxInt
                ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpAMD64MOVBstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWloadidx2)
+               v.AuxInt = c + d
+               v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem)
+       // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
        // cond:
-       // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
+       // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
        for {
-               x := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               v.reset(OpAMD64MOVBstoreconstidx1)
-               v.AuxInt = x
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWloadidx2)
+               v.AuxInt = c + 2*d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
+       // cond:
+       // result: (MOVWstore [off] {sym} ptr x mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               x := v.Args[1]
-               if x.Op != OpAMD64MOVBstoreconst {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVWQSX {
                        break
                }
-               mem := x.Args[1]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
+       // cond:
+       // result: (MOVWstore [off] {sym} ptr x mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVWQZX {
                        break
                }
-               v.reset(OpAMD64MOVWstoreconst)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
-       // cond:
-       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // match: (MOVWstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
+       // cond: is32Bit(off1+off2)
+       // result: (MOVWstore  [off1+off2] {sym} ptr val mem)
        for {
-               x := v.AuxInt
+               off1 := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               c := v_0.AuxInt
+               off2 := v_0.AuxInt
                ptr := v_0.Args[0]
-               idx := v.Args[1]
+               val := v.Args[1]
                mem := v.Args[2]
-               v.reset(OpAMD64MOVBstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
+               if !(is32Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstore)
+               v.AuxInt = off1 + off2
                v.Aux = sym
                v.AddArg(ptr)
-               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
-       // cond:
-       // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+       // cond: validOff(off)
+       // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
        for {
-               x := v.AuxInt
+               off := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
                c := v_1.AuxInt
-               idx := v_1.Args[0]
                mem := v.Args[2]
-               v.reset(OpAMD64MOVBstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
+               if !(validOff(off)) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstoreconst)
+               v.AuxInt = makeValAndOff(int64(int16(c)), off)
                v.Aux = sym
                v.AddArg(ptr)
-               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
+       // match: (MOVWstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               i := v.Args[1]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVBstoreconstidx1 {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               if p != x.Args[0] {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               base := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               if i != x.Args[1] {
+               v.reset(OpAMD64MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(base)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ1 {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64MOVWstoreconstidx1)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(i)
+               v.reset(OpAMD64MOVWstoreidx1)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+       // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
+       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+       // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
        for {
-               c := v.AuxInt
-               sym := v.Aux
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64LEAQ2 {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVBstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
+               v.reset(OpAMD64MOVWstoreidx2)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-       // cond:
-       // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
+       // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem)
+       // cond: ptr.Op != OpSB
+       // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
        for {
-               c := v.AuxInt
+               off := v.AuxInt
                sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVBstoreidx1)
-               v.AuxInt = c + d
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(ptr.Op != OpSB) {
+                       break
+               }
+               v.reset(OpAMD64MOVWstoreidx1)
+               v.AuxInt = off
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
@@ -6572,27 +8227,26 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
+       // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
        // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
+       // result: (MOVLstore [i-2] {s} p w mem)
        for {
                i := v.AuxInt
                s := v.Aux
                p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHRQconst {
                        break
                }
-               if v_2.AuxInt != 8 {
+               if v_1.AuxInt != 16 {
                        break
                }
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVBstoreidx1 {
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVWstore {
                        break
                }
-               if x.AuxInt != i-1 {
+               if x.AuxInt != i-2 {
                        break
                }
                if x.Aux != s {
@@ -6601,44 +8255,39 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
                if p != x.Args[0] {
                        break
                }
-               if idx != x.Args[1] {
-                       break
-               }
-               if w != x.Args[2] {
+               if w != x.Args[1] {
                        break
                }
-               mem := x.Args[3]
+               mem := x.Args[2]
                if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(OpAMD64MOVWstoreidx1)
-               v.AuxInt = i - 1
+               v.reset(OpAMD64MOVLstore)
+               v.AuxInt = i - 2
                v.Aux = s
                v.AddArg(p)
-               v.AddArg(idx)
                v.AddArg(w)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
+       // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
        // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
+       // result: (MOVLstore [i-2] {s} p w0 mem)
        for {
                i := v.AuxInt
                s := v.Aux
                p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64SHRQconst {
                        break
                }
-               j := v_2.AuxInt
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVBstoreidx1 {
+               j := v_1.AuxInt
+               w := v_1.Args[0]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVWstore {
                        break
                }
-               if x.AuxInt != i-1 {
+               if x.AuxInt != i-2 {
                        break
                }
                if x.Aux != s {
@@ -6647,367 +8296,383 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
                if p != x.Args[0] {
                        break
                }
-               if idx != x.Args[1] {
-                       break
-               }
-               w0 := x.Args[2]
+               w0 := x.Args[1]
                if w0.Op != OpAMD64SHRQconst {
                        break
                }
-               if w0.AuxInt != j-8 {
+               if w0.AuxInt != j-16 {
                        break
                }
                if w != w0.Args[0] {
                        break
                }
-               mem := x.Args[3]
+               mem := x.Args[2]
                if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(OpAMD64MOVWstoreidx1)
-               v.AuxInt = i - 1
+               v.reset(OpAMD64MOVLstore)
+               v.AuxInt = i - 2
                v.Aux = s
                v.AddArg(p)
-               v.AddArg(idx)
                v.AddArg(w0)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+       // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+       // cond: ValAndOff(sc).canAdd(off)
+       // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
        for {
-               x := v.Args[0]
-               if x.Op != OpAMD64MOVLload {
+               sc := v.AuxInt
+               s := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               mem := x.Args[1]
-               if !(x.Uses == 1 && clobber(x)) {
+               off := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(ValAndOff(sc).canAdd(off)) {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
+               v.reset(OpAMD64MOVWstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = s
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (MOVLQSX (ANDLconst [c] x))
-       // cond: c & 0x80000000 == 0
-       // result: (ANDLconst [c & 0x7fffffff] x)
+       // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
+       // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
        for {
+               sc := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               if v_0.Op != OpAMD64LEAQ {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               if !(c&0x80000000 == 0) {
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
                        break
                }
-               v.reset(OpAMD64ANDLconst)
-               v.AuxInt = c & 0x7fffffff
-               v.AddArg(x)
+               v.reset(OpAMD64MOVWstoreconst)
+               v.AuxInt = ValAndOff(sc).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               off1 := v.AuxInt
+               x := v.AuxInt
                sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if v_0.Op != OpAMD64LEAQ1 {
                        break
                }
-               off2 := v_0.AuxInt
+               off := v_0.AuxInt
                sym2 := v_0.Aux
-               base := v_0.Args[0]
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
                mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpAMD64MOVLQSXload)
-               v.AuxInt = off1 + off2
+               v.reset(OpAMD64MOVWstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(off)
                v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
+               v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+       // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem)
+       // cond: canMergeSym(sym1, sym2)
+       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
        for {
-               x := v.Args[0]
-               if x.Op != OpAMD64MOVLload {
+               x := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64LEAQ2 {
                        break
                }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               mem := x.Args[1]
-               if !(x.Uses == 1 && clobber(x)) {
+               off := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
+               v.reset(OpAMD64MOVWstoreconstidx2)
+               v.AuxInt = ValAndOff(x).add(off)
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
+       // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem)
+       // cond:
+       // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
        for {
-               x := v.Args[0]
-               if x.Op != OpAMD64MOVLloadidx1 {
-                       break
-               }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               idx := x.Args[1]
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               x := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ADDQ {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               v.reset(OpAMD64MOVWstoreconstidx1)
+               v.AuxInt = x
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
+       // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
        for {
-               x := v.Args[0]
-               if x.Op != OpAMD64MOVLloadidx4 {
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               x := v.Args[1]
+               if x.Op != OpAMD64MOVWstoreconst {
                        break
                }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               idx := x.Args[1]
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               a := x.AuxInt
+               if x.Aux != s {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
-               return true
-       }
-       // match: (MOVLQZX (ANDLconst [c] x))
-       // cond:
-       // result: (ANDLconst [c] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               if p != x.Args[0] {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64ANDLconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               mem := x.Args[1]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreconst)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
+       // cond:
+       // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
        for {
-               off := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLstore {
+               if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               if v_1.AuxInt != 1 {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWstoreconstidx2)
+               v.AuxInt = c
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (MOVLload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVLload  [off1+off2] {sym} ptr mem)
+       // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
+       // cond:
+       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               off1 := v.AuxInt
+               x := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               off2 := v_0.AuxInt
+               c := v_0.AuxInt
                ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
+       // cond:
+       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       for {
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               v.reset(OpAMD64MOVLload)
-               v.AuxInt = off1 + off2
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWstoreconstidx1)
+               v.AuxInt = ValAndOff(x).add(c)
                v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               i := v.Args[1]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVWstoreconstidx1 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               a := x.AuxInt
+               if x.Aux != s {
                        break
                }
-               v.reset(OpAMD64MOVLload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
+               if p != x.Args[0] {
+                       break
+               }
+               if i != x.Args[1] {
+                       break
+               }
+               mem := x.Args[2]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreconstidx1)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v.AddArg(i)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
+       // cond:
+       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               x := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
+               c := v_0.AuxInt
                ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               idx := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWstoreconstidx2)
+               v.AuxInt = ValAndOff(x).add(c)
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
+       // cond:
+       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
+       for {
+               x := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               v.reset(OpAMD64MOVLloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpAMD64MOVWstoreconstidx2)
+               v.AuxInt = ValAndOff(x).add(2 * c)
+               v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
+       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
+       // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ4 {
+               c := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               i := v.Args[1]
+               x := v.Args[2]
+               if x.Op != OpAMD64MOVWstoreconstidx2 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               a := x.AuxInt
+               if x.Aux != s {
                        break
                }
-               v.reset(OpAMD64MOVLloadidx4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVLloadidx1 [off] {sym} ptr idx mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if p != x.Args[0] {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
+               if i != x.Args[1] {
                        break
                }
-               v.reset(OpAMD64MOVLloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               mem := x.Args[2]
+               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreconstidx1)
+               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
+               v0.AuxInt = 1
+               v0.AddArg(i)
+               v.AddArg(v0)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
+       // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem)
        // cond:
-       // result: (MOVLloadidx4 [c] {sym} ptr idx mem)
+       // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
        for {
                c := v.AuxInt
                sym := v.Aux
@@ -7016,22 +8681,24 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
                if v_1.Op != OpAMD64SHLQconst {
                        break
                }
-               if v_1.AuxInt != 2 {
+               if v_1.AuxInt != 1 {
                        break
                }
                idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLloadidx4)
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVWstoreidx2)
                v.AuxInt = c
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
+       // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
                c := v.AuxInt
                sym := v.Aux
@@ -7042,18 +8709,20 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
                d := v_0.AuxInt
                ptr := v_0.Args[0]
                idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLloadidx1)
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVWstoreidx1)
                v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
+       // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
        // cond:
-       // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
        for {
                c := v.AuxInt
                sym := v.Aux
@@ -7064,287 +8733,241 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
                }
                d := v_1.AuxInt
                idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLloadidx1)
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVWstoreidx1)
                v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
                v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
+       // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLloadidx4)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
+               if v_2.AuxInt != 16 {
+                       break
+               }
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVWstoreidx1 {
+                       break
+               }
+               if x.AuxInt != i-2 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               if w != x.Args[2] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreidx1)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
                v.AddArg(idx)
+               v.AddArg(w)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
-       // cond:
-       // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
+       // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLloadidx4)
-               v.AuxInt = c + 4*d
-               v.Aux = sym
-               v.AddArg(ptr)
+               j := v_2.AuxInt
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVWstoreidx1 {
+                       break
+               }
+               if x.AuxInt != i-2 {
+                       break
+               }
+               if x.Aux != s {
+                       break
+               }
+               if p != x.Args[0] {
+                       break
+               }
+               if idx != x.Args[1] {
+                       break
+               }
+               w0 := x.Args[2]
+               if w0.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if w0.AuxInt != j-16 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
+                       break
+               }
+               v.reset(OpAMD64MOVLstoreidx1)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
                v.AddArg(idx)
+               v.AddArg(w0)
                v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
-       // cond:
-       // result: (MOVLstore [off] {sym} ptr x mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLQSX {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
+       // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
        // cond:
-       // result: (MOVLstore [off] {sym} ptr x mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLQZX {
-                       break
-               }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVLstore  [off1+off2] {sym} ptr val mem)
+       // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
        for {
-               off1 := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
                v_0 := v.Args[0]
                if v_0.Op != OpAMD64ADDQconst {
                        break
                }
-               off2 := v_0.AuxInt
+               d := v_0.AuxInt
                ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
-                       break
-               }
-               v.reset(OpAMD64MOVLstore)
-               v.AuxInt = off1 + off2
+               idx := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVWstoreidx2)
+               v.AuxInt = c + d
                v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(idx)
                v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
-       // cond: validOff(off)
-       // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
+       // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
+       // cond:
+       // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
        for {
-               off := v.AuxInt
+               c := v.AuxInt
                sym := v.Aux
                ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(validOff(off)) {
+               if v_1.Op != OpAMD64ADDQconst {
                        break
                }
-               v.reset(OpAMD64MOVLstoreconst)
-               v.AuxInt = makeValAndOff(int64(int32(c)), off)
+               d := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpAMD64MOVWstoreidx2)
+               v.AuxInt = c + 2*d
                v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
+       // cond: x.Uses == 1   && clobber(x)
+       // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               i := v.AuxInt
+               s := v.Aux
+               p := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_2.AuxInt != 16 {
                        break
                }
-               v.reset(OpAMD64MOVLstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVWstoreidx2 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if x.AuxInt != i-2 {
                        break
                }
-               v.reset(OpAMD64MOVLstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ4 {
+               if x.Aux != s {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if p != x.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVLstoreidx4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if idx != x.Args[1] {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               if w != x.Args[2] {
+                       break
+               }
+               mem := x.Args[3]
+               if !(x.Uses == 1 && clobber(x)) {
                        break
                }
                v.reset(OpAMD64MOVLstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
+               v0.AuxInt = 1
+               v0.AddArg(idx)
+               v.AddArg(v0)
+               v.AddArg(w)
                v.AddArg(mem)
                return true
        }
-       // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
+       // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
        // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVQstore [i-4] {s} p w mem)
+       // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
        for {
                i := v.AuxInt
                s := v.Aux
                p := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHRQconst {
-                       break
-               }
-               if v_1.AuxInt != 32 {
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpAMD64SHRQconst {
                        break
                }
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVLstore {
+               j := v_2.AuxInt
+               w := v_2.Args[0]
+               x := v.Args[3]
+               if x.Op != OpAMD64MOVWstoreidx2 {
                        break
                }
-               if x.AuxInt != i-4 {
+               if x.AuxInt != i-2 {
                        break
                }
                if x.Aux != s {
@@ -7353,4134 +8976,2860 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
                if p != x.Args[0] {
                        break
                }
-               if w != x.Args[1] {
+               if idx != x.Args[1] {
                        break
                }
-               mem := x.Args[2]
+               w0 := x.Args[2]
+               if w0.Op != OpAMD64SHRQconst {
+                       break
+               }
+               if w0.AuxInt != j-16 {
+                       break
+               }
+               if w != w0.Args[0] {
+                       break
+               }
+               mem := x.Args[3]
                if !(x.Uses == 1 && clobber(x)) {
                        break
                }
-               v.reset(OpAMD64MOVQstore)
-               v.AuxInt = i - 4
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLstoreidx1)
+               v.AuxInt = i - 2
+               v.Aux = s
+               v.AddArg(p)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
+               v0.AuxInt = 1
+               v0.AddArg(idx)
+               v.AddArg(v0)
+               v.AddArg(w0)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MULL x (MOVLconst [c]))
+       // cond:
+       // result: (MULLconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpAMD64MULLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULL (MOVLconst [c]) x)
+       // cond:
+       // result: (MULLconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpAMD64MULLconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MULLconst [c] (MULLconst [d] x))
+       // cond:
+       // result: (MULLconst [int64(int32(c * d))] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MULLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64MULLconst)
+               v.AuxInt = int64(int32(c * d))
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULLconst [c] (MOVLconst [d]))
+       // cond:
+       // result: (MOVLconst [int64(int32(c*d))])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = int64(int32(c * d))
                return true
        }
-       // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVQstore [i-4] {s} p w0 mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MULQ x (MOVQconst [c]))
+       // cond: is32Bit(c)
+       // result: (MULQconst [c] x)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHRQconst {
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               j := v_1.AuxInt
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVLstore {
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
                        break
                }
-               if x.AuxInt != i-4 {
+               v.reset(OpAMD64MULQconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQ (MOVQconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (MULQconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               if x.Aux != s {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(OpAMD64MULQconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MULQconst [c] (MULQconst [d] x))
+       // cond:
+       // result: (MULQconst [c * d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MULQconst {
                        break
                }
-               w0 := x.Args[1]
-               if w0.Op != OpAMD64SHRQconst {
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64MULQconst)
+               v.AuxInt = c * d
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [-1] x)
+       // cond:
+       // result: (NEGQ x)
+       for {
+               if v.AuxInt != -1 {
                        break
                }
-               if w0.AuxInt != j-32 {
+               x := v.Args[0]
+               v.reset(OpAMD64NEGQ)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [0] _)
+       // cond:
+       // result: (MOVQconst [0])
+       for {
+               if v.AuxInt != 0 {
                        break
                }
-               if w != w0.Args[0] {
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (MULQconst [1] x)
+       // cond:
+       // result: x
+       for {
+               if v.AuxInt != 1 {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [3] x)
+       // cond:
+       // result: (LEAQ2 x x)
+       for {
+               if v.AuxInt != 3 {
                        break
                }
-               v.reset(OpAMD64MOVQstore)
-               v.AuxInt = i - 4
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w0)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ2)
+               v.AddArg(x)
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
-       // cond: ValAndOff(sc).canAdd(off)
-       // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+       // match: (MULQconst [5] x)
+       // cond:
+       // result: (LEAQ4 x x)
        for {
-               sc := v.AuxInt
-               s := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v.AuxInt != 5 {
                        break
                }
-               off := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(ValAndOff(sc).canAdd(off)) {
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ4)
+               v.AddArg(x)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [7] x)
+       // cond:
+       // result: (LEAQ8 (NEGQ <v.Type> x) x)
+       for {
+               if v.AuxInt != 7 {
                        break
                }
-               v.reset(OpAMD64MOVLstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = s
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ8)
+               v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-       // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+       // match: (MULQconst [9] x)
+       // cond:
+       // result: (LEAQ8 x x)
        for {
-               sc := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if v.AuxInt != 9 {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ8)
+               v.AddArg(x)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [11] x)
+       // cond:
+       // result: (LEAQ2 x (LEAQ4 <v.Type> x x))
+       for {
+               if v.AuxInt != 11 {
                        break
                }
-               v.reset(OpAMD64MOVLstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ2)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (MULQconst [13] x)
+       // cond:
+       // result: (LEAQ4 x (LEAQ2 <v.Type> x x))
        for {
-               x := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if v.AuxInt != 13 {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ4)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MULQconst [21] x)
+       // cond:
+       // result: (LEAQ4 x (LEAQ4 <v.Type> x x))
+       for {
+               if v.AuxInt != 21 {
                        break
                }
-               v.reset(OpAMD64MOVLstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ4)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MULQconst [25] x)
+       // cond:
+       // result: (LEAQ8 x (LEAQ2 <v.Type> x x))
+       for {
+               if v.AuxInt != 25 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ8)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (MULQconst [37] x)
+       // cond:
+       // result: (LEAQ4 x (LEAQ8 <v.Type> x x))
        for {
-               x := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ4 {
+               if v.AuxInt != 37 {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ4)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MULQconst [41] x)
+       // cond:
+       // result: (LEAQ8 x (LEAQ4 <v.Type> x x))
+       for {
+               if v.AuxInt != 41 {
                        break
                }
-               v.reset(OpAMD64MOVLstoreconstidx4)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ8)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem)
+       // match: (MULQconst [73] x)
        // cond:
-       // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
+       // result: (LEAQ8 x (LEAQ8 <v.Type> x x))
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if v.AuxInt != 73 {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               v.reset(OpAMD64MOVLstoreconstidx1)
-               v.AuxInt = x
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpAMD64LEAQ8)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+       // match: (MULQconst [c] x)
+       // cond: isPowerOfTwo(c)
+       // result: (SHLQconst [log2(c)] x)
        for {
                c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               x := v.Args[1]
-               if x.Op != OpAMD64MOVLstoreconst {
+               x := v.Args[0]
+               if !(isPowerOfTwo(c)) {
                        break
                }
-               a := x.AuxInt
-               if x.Aux != s {
+               v.reset(OpAMD64SHLQconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [c] x)
+       // cond: isPowerOfTwo(c+1) && c >= 15
+       // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c+1) && c >= 15) {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(OpAMD64SUBQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
+               v0.AuxInt = log2(c + 1)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [c] x)
+       // cond: isPowerOfTwo(c-1) && c >= 17
+       // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c-1) && c >= 17) {
                        break
                }
-               mem := x.Args[1]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+               v.reset(OpAMD64LEAQ1)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
+               v0.AuxInt = log2(c - 1)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [c] x)
+       // cond: isPowerOfTwo(c-2) && c >= 34
+       // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c-2) && c >= 34) {
                        break
                }
-               v.reset(OpAMD64MOVQstore)
-               v.AuxInt = ValAndOff(a).Off()
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-               v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
+               v.reset(OpAMD64LEAQ2)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
+               v0.AuxInt = log2(c - 2)
+               v0.AddArg(x)
                v.AddArg(v0)
-               v.AddArg(mem)
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
-       // cond:
-       // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
+       // match: (MULQconst [c] x)
+       // cond: isPowerOfTwo(c-4) && c >= 68
+       // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
        for {
                c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
+               x := v.Args[0]
+               if !(isPowerOfTwo(c-4) && c >= 68) {
                        break
                }
-               if v_1.AuxInt != 2 {
+               v.reset(OpAMD64LEAQ4)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
+               v0.AuxInt = log2(c - 4)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
+               return true
+       }
+       // match: (MULQconst [c] x)
+       // cond: isPowerOfTwo(c-8) && c >= 136
+       // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(isPowerOfTwo(c-8) && c >= 136) {
                        break
                }
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLstoreconstidx4)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64LEAQ8)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
+               v0.AuxInt = log2(c - 8)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
-       // cond:
-       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // match: (MULQconst [c] x)
+       // cond: c%3 == 0 && isPowerOfTwo(c/3)
+       // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(c%3 == 0 && isPowerOfTwo(c/3)) {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64SHLQconst)
+               v.AuxInt = log2(c / 3)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
-       // cond:
-       // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // match: (MULQconst [c] x)
+       // cond: c%5 == 0 && isPowerOfTwo(c/5)
+       // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(c%5 == 0 && isPowerOfTwo(c/5)) {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64SHLQconst)
+               v.AuxInt = log2(c / 5)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+       // match: (MULQconst [c] x)
+       // cond: c%9 == 0 && isPowerOfTwo(c/9)
+       // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
        for {
                c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               i := v.Args[1]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVLstoreconstidx1 {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if i != x.Args[1] {
+               x := v.Args[0]
+               if !(c%9 == 0 && isPowerOfTwo(c/9)) {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+               v.reset(OpAMD64SHLQconst)
+               v.AuxInt = log2(c / 9)
+               v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (MULQconst [c] (MOVQconst [d]))
+       // cond:
+       // result: (MOVQconst [c*d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = ValAndOff(a).Off()
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(i)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-               v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
-               v.AddArg(v0)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = c * d
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
+       // match: (NEGL (MOVLconst [c]))
        // cond:
-       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (MOVLconst [int64(int32(-c))])
        for {
-               x := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
                c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLstoreconstidx4)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = int64(int32(-c))
                return true
        }
-       // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NEGQ (MOVQconst [c]))
        // cond:
-       // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
+       // result: (MOVQconst [-c])
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVLstoreconstidx4)
-               v.AuxInt = ValAndOff(x).add(4 * c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = -c
                return true
        }
-       // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NOTL (MOVLconst [c]))
+       // cond:
+       // result: (MOVLconst [^c])
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               i := v.Args[1]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVLstoreconstidx4 {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if i != x.Args[1] {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
+               c := v_0.AuxInt
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = ^c
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NOTQ (MOVQconst [c]))
+       // cond:
+       // result: (MOVQconst [^c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = ValAndOff(a).Off()
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
-               v0.AuxInt = 2
-               v0.AddArg(i)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-               v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
-               v.AddArg(v1)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = ^c
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
+       // match: (ORL x (MOVLconst [c]))
        // cond:
-       // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
+       // result: (ORLconst [c] x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if v_1.AuxInt != 2 {
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVLstoreidx4)
+               c := v_1.AuxInt
+               v.reset(OpAMD64ORLconst)
                v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
+       // match: (ORL (MOVLconst [c]) x)
        // cond:
-       // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (ORLconst [c] x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVLstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpAMD64ORLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
+       // match: (ORL x x)
        // cond:
-       // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: x
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               x := v.Args[0]
+               if x != v.Args[1] {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVLstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVQstoreidx1 [i-4] {s} p idx w mem)
+       // match: (ORL                  x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
+       // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
+               x0 := v.Args[0]
+               if x0.Op != OpAMD64MOVBload {
                        break
                }
-               if v_2.AuxInt != 32 {
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               mem := x0.Args[1]
+               s0 := v.Args[1]
+               if s0.Op != OpAMD64SHLLconst {
                        break
                }
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVLstoreidx1 {
+               if s0.AuxInt != 8 {
                        break
                }
-               if x.AuxInt != i-4 {
+               x1 := s0.Args[0]
+               if x1.Op != OpAMD64MOVBload {
                        break
                }
-               if x.Aux != s {
+               if x1.AuxInt != i+1 {
                        break
                }
-               if p != x.Args[0] {
+               if x1.Aux != s {
                        break
                }
-               if idx != x.Args[1] {
+               if p != x1.Args[0] {
                        break
                }
-               if w != x.Args[2] {
+               if mem != x1.Args[1] {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = i - 4
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(idx)
-               v.AddArg(w)
-               v.AddArg(mem)
+               b = mergePoint(b, x0, x1)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
+               v0.AddArg(mem)
                return true
        }
-       // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
+       // match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
+       // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
+               o0 := v.Args[0]
+               if o0.Op != OpAMD64ORL {
                        break
                }
-               j := v_2.AuxInt
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVLstoreidx1 {
+               o1 := o0.Args[0]
+               if o1.Op != OpAMD64ORL {
                        break
                }
-               if x.AuxInt != i-4 {
+               x0 := o1.Args[0]
+               if x0.Op != OpAMD64MOVBload {
                        break
                }
-               if x.Aux != s {
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               mem := x0.Args[1]
+               s0 := o1.Args[1]
+               if s0.Op != OpAMD64SHLLconst {
                        break
                }
-               if p != x.Args[0] {
+               if s0.AuxInt != 8 {
                        break
                }
-               if idx != x.Args[1] {
+               x1 := s0.Args[0]
+               if x1.Op != OpAMD64MOVBload {
                        break
                }
-               w0 := x.Args[2]
-               if w0.Op != OpAMD64SHRQconst {
+               if x1.AuxInt != i+1 {
                        break
                }
-               if w0.AuxInt != j-32 {
+               if x1.Aux != s {
                        break
                }
-               if w != w0.Args[0] {
+               if p != x1.Args[0] {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               if mem != x1.Args[1] {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = i - 4
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(idx)
-               v.AddArg(w0)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               s1 := o0.Args[1]
+               if s1.Op != OpAMD64SHLLconst {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVLstoreidx4)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-       // cond:
-       // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if s1.AuxInt != 16 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVLstoreidx4)
-               v.AuxInt = c + 4*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
-       for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
+               x2 := s1.Args[0]
+               if x2.Op != OpAMD64MOVBload {
                        break
                }
-               if v_2.AuxInt != 32 {
+               if x2.AuxInt != i+2 {
                        break
                }
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVLstoreidx4 {
+               if x2.Aux != s {
                        break
                }
-               if x.AuxInt != i-4 {
+               if p != x2.Args[0] {
                        break
                }
-               if x.Aux != s {
+               if mem != x2.Args[1] {
                        break
                }
-               if p != x.Args[0] {
+               s2 := v.Args[1]
+               if s2.Op != OpAMD64SHLLconst {
                        break
                }
-               if idx != x.Args[1] {
+               if s2.AuxInt != 24 {
                        break
                }
-               if w != x.Args[2] {
+               x3 := s2.Args[0]
+               if x3.Op != OpAMD64MOVBload {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               if x3.AuxInt != i+3 {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = i - 4
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
-               v0.AuxInt = 2
-               v0.AddArg(idx)
+               if x3.Aux != s {
+                       break
+               }
+               if p != x3.Args[0] {
+                       break
+               }
+               if mem != x3.Args[1] {
+                       break
+               }
+               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+                       break
+               }
+               b = mergePoint(b, x0, x1, x2, x3)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+               v.reset(OpCopy)
                v.AddArg(v0)
-               v.AddArg(w)
-               v.AddArg(mem)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
+               v0.AddArg(mem)
                return true
        }
-       // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
+       // match: (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
+       // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
+               x0 := v.Args[0]
+               if x0.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               j := v_2.AuxInt
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVLstoreidx4 {
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               idx := x0.Args[1]
+               mem := x0.Args[2]
+               s0 := v.Args[1]
+               if s0.Op != OpAMD64SHLLconst {
                        break
                }
-               if x.AuxInt != i-4 {
+               if s0.AuxInt != 8 {
                        break
                }
-               if x.Aux != s {
+               x1 := s0.Args[0]
+               if x1.Op != OpAMD64MOVBloadidx1 {
+                       break
+               }
+               if x1.AuxInt != i+1 {
+                       break
+               }
+               if x1.Aux != s {
+                       break
+               }
+               if p != x1.Args[0] {
+                       break
+               }
+               if idx != x1.Args[1] {
+                       break
+               }
+               if mem != x1.Args[2] {
+                       break
+               }
+               if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
+                       break
+               }
+               b = mergePoint(b, x0, x1)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
+               return true
+       }
+       // match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
+       // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
+       for {
+               o0 := v.Args[0]
+               if o0.Op != OpAMD64ORL {
                        break
                }
-               if p != x.Args[0] {
+               o1 := o0.Args[0]
+               if o1.Op != OpAMD64ORL {
                        break
                }
-               if idx != x.Args[1] {
+               x0 := o1.Args[0]
+               if x0.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               w0 := x.Args[2]
-               if w0.Op != OpAMD64SHRQconst {
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               idx := x0.Args[1]
+               mem := x0.Args[2]
+               s0 := o1.Args[1]
+               if s0.Op != OpAMD64SHLLconst {
                        break
                }
-               if w0.AuxInt != j-32 {
+               if s0.AuxInt != 8 {
                        break
                }
-               if w != w0.Args[0] {
+               x1 := s0.Args[0]
+               if x1.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               if x1.AuxInt != i+1 {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = i - 4
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
-               v0.AuxInt = 2
-               v0.AddArg(idx)
-               v.AddArg(v0)
-               v.AddArg(w0)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVOload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVOload  [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if x1.Aux != s {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               if p != x1.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVOload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if idx != x1.Args[1] {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if mem != x1.Args[2] {
                        break
                }
-               v.reset(OpAMD64MOVOload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVOstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVOstore  [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               s1 := o0.Args[1]
+               if s1.Op != OpAMD64SHLLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               if s1.AuxInt != 16 {
                        break
                }
-               v.reset(OpAMD64MOVOstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               x2 := s1.Args[0]
+               if x2.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if x2.AuxInt != i+2 {
                        break
                }
-               v.reset(OpAMD64MOVOstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQstore {
+               if x2.Aux != s {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               if p != x2.Args[0] {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (MOVQload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVQload  [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if idx != x2.Args[1] {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               if mem != x2.Args[2] {
                        break
                }
-               v.reset(OpAMD64MOVQload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               s2 := v.Args[1]
+               if s2.Op != OpAMD64SHLLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if s2.AuxInt != 24 {
                        break
                }
-               v.reset(OpAMD64MOVQload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               x3 := s2.Args[0]
+               if x3.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if x3.AuxInt != i+3 {
                        break
                }
-               v.reset(OpAMD64MOVQloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ8 {
+               if x3.Aux != s {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if p != x3.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVQloadidx8)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVQloadidx1 [off] {sym} ptr idx mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if idx != x3.Args[1] {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
+               if mem != x3.Args[2] {
                        break
                }
-               v.reset(OpAMD64MOVQloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
+                       break
+               }
+               b = mergePoint(b, x0, x1, x2, x3)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
-       // cond:
-       // result: (MOVQloadidx8 [c] {sym} ptr idx mem)
+       // match: (ORLconst [c] x)
+       // cond: int32(c)==0
+       // result: x
        for {
                c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if v_1.AuxInt != 3 {
+               x := v.Args[0]
+               if !(int32(c) == 0) {
                        break
                }
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQloadidx8)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
+       // match: (ORLconst [c] _)
+       // cond: int32(c)==-1
+       // result: (MOVLconst [-1])
        for {
                c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if !(int32(c) == -1) {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = -1
                return true
        }
-       // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
+       // match: (ORLconst [c] (MOVLconst [d]))
        // cond:
-       // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (MOVLconst [c|d])
        for {
                c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = c | d
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
+       // match: (ORQ x (MOVQconst [c]))
+       // cond: is32Bit(c)
+       // result: (ORQconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpAMD64ORQconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (ORQ (MOVQconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (ORQconst [c] x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQloadidx8)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpAMD64ORQconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
+       // match: (ORQ x x)
        // cond:
-       // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
+       // result: x
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLQconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem)))     s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem)))     s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem)))     s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem)))     s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
+       // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem)
+       for {
+               o0 := v.Args[0]
+               if o0.Op != OpAMD64ORQ {
+                       break
+               }
+               o1 := o0.Args[0]
+               if o1.Op != OpAMD64ORQ {
+                       break
+               }
+               o2 := o1.Args[0]
+               if o2.Op != OpAMD64ORQ {
+                       break
+               }
+               o3 := o2.Args[0]
+               if o3.Op != OpAMD64ORQ {
+                       break
+               }
+               o4 := o3.Args[0]
+               if o4.Op != OpAMD64ORQ {
+                       break
+               }
+               o5 := o4.Args[0]
+               if o5.Op != OpAMD64ORQ {
+                       break
+               }
+               x0 := o5.Args[0]
+               if x0.Op != OpAMD64MOVBload {
+                       break
+               }
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               mem := x0.Args[1]
+               s0 := o5.Args[1]
+               if s0.Op != OpAMD64SHLQconst {
+                       break
+               }
+               if s0.AuxInt != 8 {
+                       break
+               }
+               x1 := s0.Args[0]
+               if x1.Op != OpAMD64MOVBload {
+                       break
+               }
+               if x1.AuxInt != i+1 {
+                       break
+               }
+               if x1.Aux != s {
+                       break
+               }
+               if p != x1.Args[0] {
+                       break
+               }
+               if mem != x1.Args[1] {
+                       break
+               }
+               s1 := o4.Args[1]
+               if s1.Op != OpAMD64SHLQconst {
+                       break
+               }
+               if s1.AuxInt != 16 {
+                       break
+               }
+               x2 := s1.Args[0]
+               if x2.Op != OpAMD64MOVBload {
+                       break
+               }
+               if x2.AuxInt != i+2 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQloadidx8)
-               v.AuxInt = c + 8*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVQstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVQstore  [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if x2.Aux != s {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               if p != x2.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVQstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
-       // cond: validValAndOff(c,off)
-       // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
+               if mem != x2.Args[1] {
                        break
                }
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(validValAndOff(c, off)) {
+               s2 := o3.Args[1]
+               if s2.Op != OpAMD64SHLQconst {
                        break
                }
-               v.reset(OpAMD64MOVQstoreconst)
-               v.AuxInt = makeValAndOff(c, off)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if s2.AuxInt != 24 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               x3 := s2.Args[0]
+               if x3.Op != OpAMD64MOVBload {
                        break
                }
-               v.reset(OpAMD64MOVQstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if x3.AuxInt != i+3 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if x3.Aux != s {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ8 {
+               if p != x3.Args[0] {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if mem != x3.Args[1] {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx8)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               s3 := o2.Args[1]
+               if s3.Op != OpAMD64SHLQconst {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               if s3.AuxInt != 32 {
                        break
                }
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
-       // cond: ValAndOff(sc).canAdd(off)
-       // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-       for {
-               sc := v.AuxInt
-               s := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               x4 := s3.Args[0]
+               if x4.Op != OpAMD64MOVBload {
                        break
                }
-               off := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(ValAndOff(sc).canAdd(off)) {
+               if x4.AuxInt != i+4 {
                        break
                }
-               v.reset(OpAMD64MOVQstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = s
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-       // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-       for {
-               sc := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if x4.Aux != s {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+               if p != x4.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVQstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if mem != x4.Args[1] {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               s4 := o1.Args[1]
+               if s4.Op != OpAMD64SHLQconst {
                        break
                }
-               v.reset(OpAMD64MOVQstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ8 {
+               if s4.AuxInt != 40 {
+                       break
+               }
+               x5 := s4.Args[0]
+               if x5.Op != OpAMD64MOVBload {
+                       break
+               }
+               if x5.AuxInt != i+5 {
+                       break
+               }
+               if x5.Aux != s {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if p != x5.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVQstoreconstidx8)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem)
-       // cond:
-       // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if mem != x5.Args[1] {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               v.reset(OpAMD64MOVQstoreconstidx1)
-               v.AuxInt = x
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
-       // cond:
-       // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
+               s5 := o0.Args[1]
+               if s5.Op != OpAMD64SHLQconst {
                        break
                }
-               if v_1.AuxInt != 3 {
+               if s5.AuxInt != 48 {
                        break
                }
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQstoreconstidx8)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
-       // cond:
-       // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               x6 := s5.Args[0]
+               if x6.Op != OpAMD64MOVBload {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
-       // cond:
-       // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if x6.AuxInt != i+6 {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
-       // cond:
-       // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if x6.Aux != s {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQstoreconstidx8)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
-       // cond:
-       // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
-       for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if p != x6.Args[0] {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVQstoreconstidx8)
-               v.AuxInt = ValAndOff(x).add(8 * c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
-       // cond:
-       // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
+               if mem != x6.Args[1] {
                        break
                }
-               if v_1.AuxInt != 3 {
+               s6 := v.Args[1]
+               if s6.Op != OpAMD64SHLQconst {
                        break
                }
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVQstoreidx8)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if s6.AuxInt != 56 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-       // cond:
-       // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               x7 := s6.Args[0]
+               if x7.Op != OpAMD64MOVBload {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVQstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if x7.AuxInt != i+7 {
+                       break
+               }
+               if x7.Aux != s {
+                       break
+               }
+               if p != x7.Args[0] {
+                       break
+               }
+               if mem != x7.Args[1] {
+                       break
+               }
+               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVQstoreidx8)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
+               v0.AddArg(mem)
                return true
        }
-       // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-       // cond:
-       // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
+       // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLQconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))     s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem)))     s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem)))     s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem)))     s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem)))
+       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
+       // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               o0 := v.Args[0]
+               if o0.Op != OpAMD64ORQ {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVQstoreidx8)
-               v.AuxInt = c + 8*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVSDload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               o1 := o0.Args[0]
+               if o1.Op != OpAMD64ORQ {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               o2 := o1.Args[0]
+               if o2.Op != OpAMD64ORQ {
                        break
                }
-               v.reset(OpAMD64MOVSDload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               o3 := o2.Args[0]
+               if o3.Op != OpAMD64ORQ {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               o4 := o3.Args[0]
+               if o4.Op != OpAMD64ORQ {
                        break
                }
-               v.reset(OpAMD64MOVSDload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               o5 := o4.Args[0]
+               if o5.Op != OpAMD64ORQ {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               x0 := o5.Args[0]
+               if x0.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               v.reset(OpAMD64MOVSDloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ8 {
+               i := x0.AuxInt
+               s := x0.Aux
+               p := x0.Args[0]
+               idx := x0.Args[1]
+               mem := x0.Args[2]
+               s0 := o5.Args[1]
+               if s0.Op != OpAMD64SHLQconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if s0.AuxInt != 8 {
                        break
                }
-               v.reset(OpAMD64MOVSDloadidx8)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               x1 := s0.Args[0]
+               if x1.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
+               if x1.AuxInt != i+1 {
                        break
                }
-               v.reset(OpAMD64MOVSDloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if x1.Aux != s {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVSDloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-       // cond:
-       // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if p != x1.Args[0] {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVSDloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if idx != x1.Args[1] {
+                       break
+               }
+               if mem != x1.Args[2] {
+                       break
+               }
+               s1 := o4.Args[1]
+               if s1.Op != OpAMD64SHLQconst {
+                       break
+               }
+               if s1.AuxInt != 16 {
+                       break
+               }
+               x2 := s1.Args[0]
+               if x2.Op != OpAMD64MOVBloadidx1 {
+                       break
+               }
+               if x2.AuxInt != i+2 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVSDloadidx8)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
-       // cond:
-       // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if x2.Aux != s {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVSDloadidx8)
-               v.AuxInt = c + 8*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if p != x2.Args[0] {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               if idx != x2.Args[1] {
                        break
                }
-               v.reset(OpAMD64MOVSDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if mem != x2.Args[2] {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               s2 := o3.Args[1]
+               if s2.Op != OpAMD64SHLQconst {
                        break
                }
-               v.reset(OpAMD64MOVSDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if s2.AuxInt != 24 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               x3 := s2.Args[0]
+               if x3.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               v.reset(OpAMD64MOVSDstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ8 {
+               if x3.AuxInt != i+3 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if x3.Aux != s {
                        break
                }
-               v.reset(OpAMD64MOVSDstoreidx8)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if p != x3.Args[0] {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               if idx != x3.Args[1] {
                        break
                }
-               v.reset(OpAMD64MOVSDstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if mem != x3.Args[2] {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVSDstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-       // cond:
-       // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               s3 := o2.Args[1]
+               if s3.Op != OpAMD64SHLQconst {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVSDstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-       // cond:
-       // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if s3.AuxInt != 32 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVSDstoreidx8)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-       // cond:
-       // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               x4 := s3.Args[0]
+               if x4.Op != OpAMD64MOVBloadidx1 {
+                       break
+               }
+               if x4.AuxInt != i+4 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVSDstoreidx8)
-               v.AuxInt = c + 8*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVSSload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if x4.Aux != s {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               if p != x4.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVSSload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if idx != x4.Args[1] {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if mem != x4.Args[2] {
                        break
                }
-               v.reset(OpAMD64MOVSSload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               s4 := o1.Args[1]
+               if s4.Op != OpAMD64SHLQconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if s4.AuxInt != 40 {
                        break
                }
-               v.reset(OpAMD64MOVSSloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ4 {
+               x5 := s4.Args[0]
+               if x5.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if x5.AuxInt != i+5 {
                        break
                }
-               v.reset(OpAMD64MOVSSloadidx4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if x5.Aux != s {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
+               if p != x5.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVSSloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if idx != x5.Args[1] {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVSSloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-       // cond:
-       // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if mem != x5.Args[2] {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVSSloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
-       // cond:
-       // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               s5 := o0.Args[1]
+               if s5.Op != OpAMD64SHLQconst {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVSSloadidx4)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
-       // cond:
-       // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if s5.AuxInt != 48 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVSSloadidx4)
-               v.AuxInt = c + 4*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               x6 := s5.Args[0]
+               if x6.Op != OpAMD64MOVBloadidx1 {
+                       break
+               }
+               if x6.AuxInt != i+6 {
+                       break
+               }
+               if x6.Aux != s {
+                       break
+               }
+               if p != x6.Args[0] {
+                       break
+               }
+               if idx != x6.Args[1] {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               if mem != x6.Args[2] {
                        break
                }
-               v.reset(OpAMD64MOVSSstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               s6 := v.Args[1]
+               if s6.Op != OpAMD64SHLQconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if s6.AuxInt != 56 {
                        break
                }
-               v.reset(OpAMD64MOVSSstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               x7 := s6.Args[0]
+               if x7.Op != OpAMD64MOVBloadidx1 {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if x7.AuxInt != i+7 {
                        break
                }
-               v.reset(OpAMD64MOVSSstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ4 {
+               if x7.Aux != s {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if p != x7.Args[0] {
                        break
                }
-               v.reset(OpAMD64MOVSSstoreidx4)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if idx != x7.Args[1] {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               if mem != x7.Args[2] {
                        break
                }
-               v.reset(OpAMD64MOVSSstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
+                       break
+               }
+               b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type)
+               v.reset(OpCopy)
+               v.AddArg(v0)
+               v0.AuxInt = i
+               v0.Aux = s
+               v0.AddArg(p)
+               v0.AddArg(idx)
+               v0.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
+       // match: (ORQconst [0] x)
        // cond:
-       // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: x
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v.AuxInt != 0 {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVSSstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
+       // match: (ORQconst [-1] _)
        // cond:
-       // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (MOVQconst [-1])
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if v.AuxInt != -1 {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVSSstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = -1
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
+       // match: (ORQconst [c] (MOVQconst [d]))
        // cond:
-       // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
+       // result: (MOVQconst [c|d])
        for {
                c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
                d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVSSstoreidx4)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-       // cond:
-       // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
-                       break
-               }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVSSstoreidx4)
-               v.AuxInt = c + 4*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = c | d
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
-       for {
-               x := v.Args[0]
-               if x.Op != OpAMD64MOVWload {
-                       break
-               }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               mem := x.Args[1]
-               if !(x.Uses == 1 && clobber(x)) {
-                       break
-               }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
-               return true
-       }
-       // match: (MOVWQSX (ANDLconst [c] x))
-       // cond: c & 0x8000 == 0
-       // result: (ANDLconst [c & 0x7fff] x)
+func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ROLBconst [c] (ROLBconst [d] x))
+       // cond:
+       // result: (ROLBconst [(c+d)& 7] x)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               if v_0.Op != OpAMD64ROLBconst {
                        break
                }
-               c := v_0.AuxInt
+               d := v_0.AuxInt
                x := v_0.Args[0]
-               if !(c&0x8000 == 0) {
+               v.reset(OpAMD64ROLBconst)
+               v.AuxInt = (c + d) & 7
+               v.AddArg(x)
+               return true
+       }
+       // match: (ROLBconst [0] x)
+       // cond:
+       // result: x
+       for {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpAMD64ANDLconst)
-               v.AuxInt = c & 0x7fff
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       // match: (ROLLconst [c] (ROLLconst [d] x))
+       // cond:
+       // result: (ROLLconst [(c+d)&31] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if v_0.Op != OpAMD64ROLLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64ROLLconst)
+               v.AuxInt = (c + d) & 31
+               v.AddArg(x)
+               return true
+       }
+       // match: (ROLLconst [0] x)
+       // cond:
+       // result: x
+       for {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpAMD64MOVWQSXload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+       // match: (ROLQconst [c] (ROLQconst [d] x))
+       // cond:
+       // result: (ROLQconst [(c+d)&63] x)
        for {
-               x := v.Args[0]
-               if x.Op != OpAMD64MOVWload {
-                       break
-               }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               mem := x.Args[1]
-               if !(x.Uses == 1 && clobber(x)) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ROLQconst {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64ROLQconst)
+               v.AuxInt = (c + d) & 63
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
+       // match: (ROLQconst [0] x)
+       // cond:
+       // result: x
        for {
-               x := v.Args[0]
-               if x.Op != OpAMD64MOVWloadidx1 {
-                       break
-               }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               idx := x.Args[1]
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               if v.AuxInt != 0 {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
+               x := v.Args[0]
                v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
-       // cond: x.Uses == 1 && clobber(x)
-       // result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ROLWconst [c] (ROLWconst [d] x))
+       // cond:
+       // result: (ROLWconst [(c+d)&15] x)
        for {
-               x := v.Args[0]
-               if x.Op != OpAMD64MOVWloadidx2 {
-                       break
-               }
-               off := x.AuxInt
-               sym := x.Aux
-               ptr := x.Args[0]
-               idx := x.Args[1]
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64ROLWconst {
                        break
                }
-               b = x.Block
-               v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type)
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = off
-               v0.Aux = sym
-               v0.AddArg(ptr)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64ROLWconst)
+               v.AuxInt = (c + d) & 15
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWQZX (ANDLconst [c] x))
+       // match: (ROLWconst [0] x)
        // cond:
-       // result: (ANDLconst [c & 0xffff] x)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ANDLconst {
+               if v.AuxInt != 0 {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64ANDLconst)
-               v.AuxInt = c & 0xffff
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (SARB x (MOVQconst [c]))
+       // cond:
+       // result: (SARBconst [c&31] x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVWstore {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               c := v_1.AuxInt
+               v.reset(OpAMD64SARBconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
                return true
        }
-       // match: (MOVWload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVWload  [off1+off2] {sym} ptr mem)
+       // match: (SARB x (MOVLconst [c]))
+       // cond:
+       // result: (SARBconst [c&31] x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1 + off2)) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               v.reset(OpAMD64MOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SARBconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARBconst [c] (MOVQconst [d]))
+       // cond:
+       // result: (MOVQconst [d>>uint64(c)])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64MOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = d >> uint64(c)
                return true
        }
-       // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARL x (MOVQconst [c]))
+       // cond:
+       // result: (SARLconst [c&31] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               v.reset(OpAMD64SARLconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
+               return true
+       }
+       // match: (SARL x (MOVLconst [c]))
+       // cond:
+       // result: (SARLconst [c&31] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               v.reset(OpAMD64MOVWloadidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SARLconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (SARL x (ANDLconst [31] y))
+       // cond:
+       // result: (SARL x y)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ2 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ANDLconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_1.AuxInt != 31 {
                        break
                }
-               v.reset(OpAMD64MOVWloadidx2)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               y := v_1.Args[0]
+               v.reset(OpAMD64SARL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVWloadidx1 [off] {sym} ptr idx mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARLconst [c] (MOVQconst [d]))
+       // cond:
+       // result: (MOVQconst [d>>uint64(c)])
        for {
-               off := v.AuxInt
-               sym := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(ptr.Op != OpSB) {
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64MOVWloadidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = d >> uint64(c)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
+       // match: (SARQ x (MOVQconst [c]))
        // cond:
-       // result: (MOVWloadidx2 [c] {sym} ptr idx mem)
+       // result: (SARQconst [c&63] x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWloadidx2)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SARQconst)
+               v.AuxInt = c & 63
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
+       // match: (SARQ x (MOVLconst [c]))
        // cond:
-       // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (SARQconst [c&63] x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SARQconst)
+               v.AuxInt = c & 63
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
+       // match: (SARQ x (ANDQconst [63] y))
        // cond:
-       // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
+       // result: (SARQ x y)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               if v_1.Op != OpAMD64ANDQconst {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWloadidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               if v_1.AuxInt != 63 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64SARQ)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
+       // match: (SARQconst [c] (MOVQconst [d]))
        // cond:
-       // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
+       // result: (MOVQconst [d>>uint64(c)])
        for {
                c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
                d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWloadidx2)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
-       // cond:
-       // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
-       for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
-                       break
-               }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWloadidx2)
-               v.AuxInt = c + 2*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = d >> uint64(c)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
+       // match: (SARW x (MOVQconst [c]))
        // cond:
-       // result: (MOVWstore [off] {sym} ptr x mem)
+       // result: (SARWconst [c&31] x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVWQSX {
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SARWconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(mem)
                return true
        }
-       // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
+       // match: (SARW x (MOVLconst [c]))
        // cond:
-       // result: (MOVWstore [off] {sym} ptr x mem)
+       // result: (SARWconst [c&31] x)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVWQZX {
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SARWconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(mem)
                return true
        }
-       // match: (MOVWstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-       // cond: is32Bit(off1+off2)
-       // result: (MOVWstore  [off1+off2] {sym} ptr val mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SARWconst [c] (MOVQconst [d]))
+       // cond:
+       // result: (MOVQconst [d>>uint64(c)])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1 + off2)) {
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = d >> uint64(c)
                return true
        }
-       // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
-       // cond: validOff(off)
-       // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBBLcarrymask (FlagEQ))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(validOff(off)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               v.reset(OpAMD64MOVWstoreconst)
-               v.AuxInt = makeValAndOff(int64(int16(c)), off)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+       // match: (SBBLcarrymask (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [-1])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               base := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               v.reset(OpAMD64MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(base)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = -1
                return true
        }
-       // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       // match: (SBBLcarrymask (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVWstoreidx1)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
-       // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-       // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
+       // match: (SBBLcarrymask (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [-1])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ2 {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               v.reset(OpAMD64MOVWstoreidx2)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = -1
                return true
        }
-       // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem)
-       // cond: ptr.Op != OpSB
-       // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
+       // match: (SBBLcarrymask (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               off := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(ptr.Op != OpSB) {
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVWstoreidx1)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstore [i-2] {s} p w mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBBQcarrymask (FlagEQ))
+       // cond:
+       // result: (MOVQconst [0])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHRQconst {
-                       break
-               }
-               if v_1.AuxInt != 16 {
-                       break
-               }
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVWstore {
-                       break
-               }
-               if x.AuxInt != i-2 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if w != x.Args[1] {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SBBQcarrymask (FlagLT_ULT))
+       // cond:
+       // result: (MOVQconst [-1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               v.reset(OpAMD64MOVLstore)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = -1
                return true
-       }
-       // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstore [i-2] {s} p w0 mem)
+       }
+       // match: (SBBQcarrymask (FlagLT_UGT))
+       // cond:
+       // result: (MOVQconst [0])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHRQconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               j := v_1.AuxInt
-               w := v_1.Args[0]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVWstore {
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SBBQcarrymask (FlagGT_ULT))
+       // cond:
+       // result: (MOVQconst [-1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               if x.AuxInt != i-2 {
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = -1
+               return true
+       }
+       // match: (SBBQcarrymask (FlagGT_UGT))
+       // cond:
+       // result: (MOVQconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               if x.Aux != s {
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SETA (InvertFlags x))
+       // cond:
+       // result: (SETB x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               if p != x.Args[0] {
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETB)
+               v.AddArg(x)
+               return true
+       }
+       // match: (SETA (FlagEQ))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               w0 := x.Args[1]
-               if w0.Op != OpAMD64SHRQconst {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETA (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               if w0.AuxInt != j-16 {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETA (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               if w != w0.Args[0] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETA (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && clobber(x)) {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETA (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVLstore)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(w0)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
-       // cond: ValAndOff(sc).canAdd(off)
-       // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
+       // match: (SETAE (InvertFlags x))
+       // cond:
+       // result: (SETBE x)
        for {
-               sc := v.AuxInt
-               s := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               off := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(ValAndOff(sc).canAdd(off)) {
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETBE)
+               v.AddArg(x)
+               return true
+       }
+       // match: (SETAE (FlagEQ))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               v.reset(OpAMD64MOVWstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = s
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-       // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
+       // match: (SETAE (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               sc := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ {
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETAE (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVWstoreconst)
-               v.AuxInt = ValAndOff(sc).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+       // match: (SETAE (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               x := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ1 {
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETAE (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVWstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem)
-       // cond: canMergeSym(sym1, sym2)
-       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SETB (InvertFlags x))
+       // cond:
+       // result: (SETA x)
        for {
-               x := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64LEAQ2 {
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               off := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETA)
+               v.AddArg(x)
+               return true
+       }
+       // match: (SETB (FlagEQ))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               v.reset(OpAMD64MOVWstoreconstidx2)
-               v.AuxInt = ValAndOff(x).add(off)
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem)
+       // match: (SETB (FlagLT_ULT))
        // cond:
-       // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
+       // result: (MOVLconst [1])
        for {
-               x := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQ {
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               v.reset(OpAMD64MOVWstoreconstidx1)
-               v.AuxInt = x
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
+       // match: (SETB (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               x := v.Args[1]
-               if x.Op != OpAMD64MOVWstoreconst {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETB (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               mem := x.Args[1]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETB (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVLstoreconst)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
+       // match: (SETBE (InvertFlags x))
        // cond:
-       // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
+       // result: (SETAE x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWstoreconstidx2)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETAE)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
+       // match: (SETBE (FlagEQ))
        // cond:
-       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (MOVLconst [1])
        for {
-               x := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
+       // match: (SETBE (FlagLT_ULT))
        // cond:
-       // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (MOVLconst [1])
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWstoreconstidx1)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
+       // match: (SETBE (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               i := v.Args[1]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVWstoreconstidx1 {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               if i != x.Args[1] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETBE (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETBE (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVLstoreconstidx1)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(i)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
+       // match: (SETEQ (InvertFlags x))
        // cond:
-       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
+       // result: (SETEQ x)
        for {
-               x := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWstoreconstidx2)
-               v.AuxInt = ValAndOff(x).add(c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETEQ)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
+       // match: (SETEQ (FlagEQ))
        // cond:
-       // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
+       // result: (MOVLconst [1])
        for {
-               x := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               c := v_1.AuxInt
-               idx := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpAMD64MOVWstoreconstidx2)
-               v.AuxInt = ValAndOff(x).add(2 * c)
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
-       // cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-       // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
+       // match: (SETEQ (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               i := v.Args[1]
-               x := v.Args[2]
-               if x.Op != OpAMD64MOVWstoreconstidx2 {
-                       break
-               }
-               a := x.AuxInt
-               if x.Aux != s {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETEQ (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               if i != x.Args[1] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETEQ (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               mem := x.Args[2]
-               if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETEQ (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVLstoreconstidx1)
-               v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
-               v0.AuxInt = 1
-               v0.AddArg(i)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem)
+       // match: (SETG (InvertFlags x))
        // cond:
-       // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
+       // result: (SETL x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVWstoreidx2)
-               v.AuxInt = c
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETL)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
+       // match: (SETG (FlagEQ))
        // cond:
-       // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVWstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
+       // match: (SETG (FlagLT_ULT))
        // cond:
-       // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVWstoreidx1)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
+       // match: (SETG (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
-                       break
-               }
-               if v_2.AuxInt != 16 {
-                       break
-               }
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVWstoreidx1 {
-                       break
-               }
-               if x.AuxInt != i-2 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if idx != x.Args[1] {
-                       break
-               }
-               if w != x.Args[2] {
-                       break
-               }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVLstoreidx1)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(idx)
-               v.AddArg(w)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
+       // match: (SETG (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
-                       break
-               }
-               j := v_2.AuxInt
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVWstoreidx1 {
-                       break
-               }
-               if x.AuxInt != i-2 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               if x.Aux != s {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETG (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SETGE (InvertFlags x))
+       // cond:
+       // result: (SETLE x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               if idx != x.Args[1] {
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETLE)
+               v.AddArg(x)
+               return true
+       }
+       // match: (SETGE (FlagEQ))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               w0 := x.Args[2]
-               if w0.Op != OpAMD64SHRQconst {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETGE (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               if w0.AuxInt != j-16 {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETGE (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               if w != w0.Args[0] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETGE (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETGE (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               v.reset(OpAMD64MOVLstoreidx1)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v.AddArg(idx)
-               v.AddArg(w0)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
+       // match: (SETL (InvertFlags x))
        // cond:
-       // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
+       // result: (SETG x)
        for {
-               c := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ADDQconst {
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               d := v_0.AuxInt
-               ptr := v_0.Args[0]
-               idx := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVWstoreidx2)
-               v.AuxInt = c + d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETG)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
+       // match: (SETL (FlagEQ))
        // cond:
-       // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ADDQconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               d := v_1.AuxInt
-               idx := v_1.Args[0]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpAMD64MOVWstoreidx2)
-               v.AuxInt = c + 2*d
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
+       // match: (SETL (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
-                       break
-               }
-               if v_2.AuxInt != 16 {
-                       break
-               }
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVWstoreidx2 {
-                       break
-               }
-               if x.AuxInt != i-2 {
-                       break
-               }
-               if x.Aux != s {
-                       break
-               }
-               if p != x.Args[0] {
-                       break
-               }
-               if idx != x.Args[1] {
-                       break
-               }
-               if w != x.Args[2] {
-                       break
-               }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               v.reset(OpAMD64MOVLstoreidx1)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
-               v0.AuxInt = 1
-               v0.AddArg(idx)
-               v.AddArg(v0)
-               v.AddArg(w)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
-       // cond: x.Uses == 1   && clobber(x)
-       // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
+       // match: (SETL (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
        for {
-               i := v.AuxInt
-               s := v.Aux
-               p := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpAMD64SHRQconst {
-                       break
-               }
-               j := v_2.AuxInt
-               w := v_2.Args[0]
-               x := v.Args[3]
-               if x.Op != OpAMD64MOVWstoreidx2 {
-                       break
-               }
-               if x.AuxInt != i-2 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               if x.Aux != s {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETL (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               if p != x.Args[0] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETL (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
                        break
                }
-               if idx != x.Args[1] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       return false
+}
+func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SETLE (InvertFlags x))
+       // cond:
+       // result: (SETGE x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               w0 := x.Args[2]
-               if w0.Op != OpAMD64SHRQconst {
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETGE)
+               v.AddArg(x)
+               return true
+       }
+       // match: (SETLE (FlagEQ))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               if w0.AuxInt != j-16 {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETLE (FlagLT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               if w != w0.Args[0] {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETLE (FlagLT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               mem := x.Args[3]
-               if !(x.Uses == 1 && clobber(x)) {
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETLE (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
                        break
                }
-               v.reset(OpAMD64MOVLstoreidx1)
-               v.AuxInt = i - 2
-               v.Aux = s
-               v.AddArg(p)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
-               v0.AuxInt = 1
-               v0.AddArg(idx)
-               v.AddArg(v0)
-               v.AddArg(w0)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (SETLE (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MULL x (MOVLconst [c]))
+       // match: (SETNE (InvertFlags x))
        // cond:
-       // result: (MULLconst [c] x)
+       // result: (SETNE x)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64InvertFlags {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpAMD64MULLconst)
-               v.AuxInt = c
+               x := v_0.Args[0]
+               v.reset(OpAMD64SETNE)
                v.AddArg(x)
                return true
        }
-       // match: (MULL (MOVLconst [c]) x)
+       // match: (SETNE (FlagEQ))
        // cond:
-       // result: (MULLconst [c] x)
+       // result: (MOVLconst [0])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64FlagEQ {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpAMD64MULLconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MULLconst [c] (MULLconst [d] x))
+       // match: (SETNE (FlagLT_ULT))
        // cond:
-       // result: (MULLconst [int64(int32(c * d))] x)
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MULLconst {
+               if v_0.Op != OpAMD64FlagLT_ULT {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64MULLconst)
-               v.AuxInt = int64(int32(c * d))
-               v.AddArg(x)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MULLconst [c] (MOVLconst [d]))
+       // match: (SETNE (FlagLT_UGT))
        // cond:
-       // result: (MOVLconst [int64(int32(c*d))])
+       // result: (MOVLconst [1])
        for {
-               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
+               if v_0.Op != OpAMD64FlagLT_UGT {
                        break
                }
-               d := v_0.AuxInt
                v.reset(OpAMD64MOVLconst)
-               v.AuxInt = int64(int32(c * d))
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETNE (FlagGT_ULT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_ULT {
+                       break
+               }
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (SETNE (FlagGT_UGT))
+       // cond:
+       // result: (MOVLconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64FlagGT_UGT {
+                       break
+               }
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 1
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MULQ x (MOVQconst [c]))
-       // cond: is32Bit(c)
-       // result: (MULQconst [c] x)
+       // match: (SHLL x (MOVQconst [c]))
+       // cond:
+       // result: (SHLLconst [c&31] x)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -11488,408 +11837,441 @@ func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               if !(is32Bit(c)) {
+               v.reset(OpAMD64SHLLconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
+               return true
+       }
+       // match: (SHLL x (MOVLconst [c]))
+       // cond:
+       // result: (SHLLconst [c&31] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               v.reset(OpAMD64MULQconst)
-               v.AuxInt = c
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHLLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
                return true
        }
-       // match: (MULQ (MOVQconst [c]) x)
-       // cond: is32Bit(c)
-       // result: (MULQconst [c] x)
+       // match: (SHLL x (ANDLconst [31] y))
+       // cond:
+       // result: (SHLL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ANDLconst {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(is32Bit(c)) {
+               if v_1.AuxInt != 31 {
                        break
                }
-               v.reset(OpAMD64MULQconst)
-               v.AuxInt = c
+               y := v_1.Args[0]
+               v.reset(OpAMD64SHLL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MULQconst [c] (MULQconst [d] x))
+       // match: (SHLQ x (MOVQconst [c]))
        // cond:
-       // result: (MULQconst [c * d] x)
+       // result: (SHLQconst [c&63] x)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MULQconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64MULQconst)
-               v.AuxInt = c * d
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHLQconst)
+               v.AuxInt = c & 63
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [-1] x)
+       // match: (SHLQ x (MOVLconst [c]))
        // cond:
-       // result: (NEGQ x)
+       // result: (SHLQconst [c&63] x)
        for {
-               if v.AuxInt != -1 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64NEGQ)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHLQconst)
+               v.AuxInt = c & 63
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [0] _)
+       // match: (SHLQ x (ANDQconst [63] y))
        // cond:
-       // result: (MOVQconst [0])
+       // result: (SHLQ x y)
        for {
-               if v.AuxInt != 0 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ANDQconst {
                        break
                }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = 0
+               if v_1.AuxInt != 63 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64SHLQ)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MULQconst [1] x)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SHRB x (MOVQconst [c]))
        // cond:
-       // result: x
+       // result: (SHRBconst [c&31] x)
        for {
-               if v.AuxInt != 1 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHRBconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [3] x)
+       // match: (SHRB x (MOVLconst [c]))
        // cond:
-       // result: (LEAQ2 x x)
+       // result: (SHRBconst [c&31] x)
        for {
-               if v.AuxInt != 3 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ2)
-               v.AddArg(x)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHRBconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [5] x)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SHRL x (MOVQconst [c]))
        // cond:
-       // result: (LEAQ4 x x)
+       // result: (SHRLconst [c&31] x)
        for {
-               if v.AuxInt != 5 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ4)
-               v.AddArg(x)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHRLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [7] x)
+       // match: (SHRL x (MOVLconst [c]))
        // cond:
-       // result: (LEAQ8 (NEGQ <v.Type> x) x)
+       // result: (SHRLconst [c&31] x)
        for {
-               if v.AuxInt != 7 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ8)
-               v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHRLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [9] x)
+       // match: (SHRL x (ANDLconst [31] y))
        // cond:
-       // result: (LEAQ8 x x)
+       // result: (SHRL x y)
        for {
-               if v.AuxInt != 9 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ANDLconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ8)
-               v.AddArg(x)
+               if v_1.AuxInt != 31 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64SHRL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MULQconst [11] x)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SHRQ x (MOVQconst [c]))
        // cond:
-       // result: (LEAQ2 x (LEAQ4 <v.Type> x x))
+       // result: (SHRQconst [c&63] x)
        for {
-               if v.AuxInt != 11 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ2)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHRQconst)
+               v.AuxInt = c & 63
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
                return true
        }
-       // match: (MULQconst [13] x)
+       // match: (SHRQ x (MOVLconst [c]))
        // cond:
-       // result: (LEAQ4 x (LEAQ2 <v.Type> x x))
+       // result: (SHRQconst [c&63] x)
        for {
-               if v.AuxInt != 13 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ4)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHRQconst)
+               v.AuxInt = c & 63
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
                return true
        }
-       // match: (MULQconst [21] x)
+       // match: (SHRQ x (ANDQconst [63] y))
        // cond:
-       // result: (LEAQ4 x (LEAQ4 <v.Type> x x))
+       // result: (SHRQ x y)
        for {
-               if v.AuxInt != 21 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64ANDQconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ4)
+               if v_1.AuxInt != 63 {
+                       break
+               }
+               y := v_1.Args[0]
+               v.reset(OpAMD64SHRQ)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.AddArg(y)
                return true
        }
-       // match: (MULQconst [25] x)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SHRW x (MOVQconst [c]))
        // cond:
-       // result: (LEAQ8 x (LEAQ2 <v.Type> x x))
+       // result: (SHRWconst [c&31] x)
        for {
-               if v.AuxInt != 25 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ8)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHRWconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
                return true
        }
-       // match: (MULQconst [37] x)
+       // match: (SHRW x (MOVLconst [c]))
        // cond:
-       // result: (LEAQ4 x (LEAQ8 <v.Type> x x))
+       // result: (SHRWconst [c&31] x)
        for {
-               if v.AuxInt != 37 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ4)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SHRWconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
                return true
        }
-       // match: (MULQconst [41] x)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBL x (MOVLconst [c]))
        // cond:
-       // result: (LEAQ8 x (LEAQ4 <v.Type> x x))
+       // result: (SUBLconst x [c])
        for {
-               if v.AuxInt != 41 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ8)
+               c := v_1.AuxInt
+               v.reset(OpAMD64SUBLconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
                return true
        }
-       // match: (MULQconst [73] x)
+       // match: (SUBL (MOVLconst [c]) x)
        // cond:
-       // result: (LEAQ8 x (LEAQ8 <v.Type> x x))
+       // result: (NEGL (SUBLconst <v.Type> x [c]))
        for {
-               if v.AuxInt != 73 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
                        break
                }
-               x := v.Args[0]
-               v.reset(OpAMD64LEAQ8)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
-               v0.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpAMD64NEGL)
+               v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type)
+               v0.AuxInt = c
                v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-       // match: (MULQconst [c] x)
-       // cond: isPowerOfTwo(c)
-       // result: (SHLQconst [log2(c)] x)
+       // match: (SUBL x x)
+       // cond:
+       // result: (MOVLconst [0])
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(isPowerOfTwo(c)) {
+               if x != v.Args[1] {
                        break
                }
-               v.reset(OpAMD64SHLQconst)
-               v.AuxInt = log2(c)
-               v.AddArg(x)
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULQconst [c] x)
-       // cond: isPowerOfTwo(c+1) && c >= 15
-       // result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
+       return false
+}
+func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBLconst [c] x)
+       // cond: int32(c) == 0
+       // result: x
        for {
                c := v.AuxInt
                x := v.Args[0]
-               if !(isPowerOfTwo(c+1) && c >= 15) {
+               if !(int32(c) == 0) {
                        break
                }
-               v.reset(OpAMD64SUBQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-               v0.AuxInt = log2(c + 1)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [c] x)
-       // cond: isPowerOfTwo(c-1) && c >= 17
-       // result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
+       // match: (SUBLconst [c] x)
+       // cond:
+       // result: (ADDLconst [int64(int32(-c))] x)
        for {
                c := v.AuxInt
                x := v.Args[0]
-               if !(isPowerOfTwo(c-1) && c >= 17) {
-                       break
-               }
-               v.reset(OpAMD64LEAQ1)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-               v0.AuxInt = log2(c - 1)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpAMD64ADDLconst)
+               v.AuxInt = int64(int32(-c))
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [c] x)
-       // cond: isPowerOfTwo(c-2) && c >= 34
-       // result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
+}
+func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBQ x (MOVQconst [c]))
+       // cond: is32Bit(c)
+       // result: (SUBQconst x [c])
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(isPowerOfTwo(c-2) && c >= 34) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64LEAQ2)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-               v0.AuxInt = log2(c - 2)
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v.AddArg(x)
-               return true
-       }
-       // match: (MULQconst [c] x)
-       // cond: isPowerOfTwo(c-4) && c >= 68
-       // result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
-       for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(isPowerOfTwo(c-4) && c >= 68) {
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
                        break
                }
-               v.reset(OpAMD64LEAQ4)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-               v0.AuxInt = log2(c - 4)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpAMD64SUBQconst)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (MULQconst [c] x)
-       // cond: isPowerOfTwo(c-8) && c >= 136
-       // result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
+       // match: (SUBQ (MOVQconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (NEGQ (SUBQconst <v.Type> x [c]))
        for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(isPowerOfTwo(c-8) && c >= 136) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64LEAQ8)
-               v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-               v0.AuxInt = log2(c - 8)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpAMD64NEGQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type)
+               v0.AuxInt = c
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AddArg(x)
                return true
        }
-       // match: (MULQconst [c] x)
-       // cond: c%3 == 0 && isPowerOfTwo(c/3)
-       // result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
+       // match: (SUBQ x x)
+       // cond:
+       // result: (MOVQconst [0])
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+               if x != v.Args[1] {
                        break
                }
-               v.reset(OpAMD64SHLQconst)
-               v.AuxInt = log2(c / 3)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULQconst [c] x)
-       // cond: c%5 == 0 && isPowerOfTwo(c/5)
-       // result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
+       return false
+}
+func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBQconst [0] x)
+       // cond:
+       // result: x
        for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpAMD64SHLQconst)
-               v.AuxInt = log2(c / 5)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MULQconst [c] x)
-       // cond: c%9 == 0 && isPowerOfTwo(c/9)
-       // result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
+       // match: (SUBQconst [c] x)
+       // cond: c != -(1<<31)
+       // result: (ADDQconst [-c] x)
        for {
                c := v.AuxInt
                x := v.Args[0]
-               if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+               if !(c != -(1 << 31)) {
                        break
                }
-               v.reset(OpAMD64SHLQconst)
-               v.AuxInt = log2(c / 9)
-               v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpAMD64ADDQconst)
+               v.AuxInt = -c
+               v.AddArg(x)
                return true
        }
-       // match: (MULQconst [c] (MOVQconst [d]))
+       // match: (SUBQconst (MOVQconst [d]) [c])
        // cond:
-       // result: (MOVQconst [c*d])
+       // result: (MOVQconst [d-c])
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
@@ -11898,4578 +12280,4393 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
                }
                d := v_0.AuxInt
                v.reset(OpAMD64MOVQconst)
-               v.AuxInt = c * d
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod16  x y)
-       // cond:
-       // result: (Select1 (DIVW  x y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.AuxInt = d - c
                return true
        }
-}
-func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod16u x y)
-       // cond:
-       // result: (Select1 (DIVWU x y))
+       // match: (SUBQconst (SUBQconst x [d]) [c])
+       // cond: is32Bit(-c-d)
+       // result: (ADDQconst [-c-d] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64SUBQconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(is32Bit(-c - d)) {
+                       break
+               }
+               v.reset(OpAMD64ADDQconst)
+               v.AuxInt = -c - d
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod32  x y)
+       // match: (XORL x (MOVLconst [c]))
        // cond:
-       // result: (Select1 (DIVL  x y))
+       // result: (XORLconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpAMD64XORLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod32u x y)
+       // match: (XORL (MOVLconst [c]) x)
        // cond:
-       // result: (Select1 (DIVLU x y))
+       // result: (XORLconst [c] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpAMD64XORLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod64  x y)
+       // match: (XORL x x)
        // cond:
-       // result: (Select1 (DIVQ  x y))
+       // result: (MOVLconst [0])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod64u x y)
+       // match: (XORLconst [c] (XORLconst [d] x))
        // cond:
-       // result: (Select1 (DIVQU x y))
+       // result: (XORLconst [c ^ d] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64XORLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64XORLconst)
+               v.AuxInt = c ^ d
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod8   x y)
-       // cond:
-       // result: (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
+       // match: (XORLconst [c] x)
+       // cond: int32(c)==0
+       // result: x
        for {
+               c := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
-               v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if !(int32(c) == 0) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod8u  x y)
+       // match: (XORLconst [c] (MOVLconst [d]))
        // cond:
-       // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+       // result: (MOVLconst [c^d])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpSelect1)
-               v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
-               v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVLconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = c ^ d
                return true
        }
+       return false
 }
-func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Move [s] _ _ mem)
-       // cond: SizeAndAlign(s).Size() == 0
-       // result: mem
+       // match: (XORQ x (MOVQconst [c]))
+       // cond: is32Bit(c)
+       // result: (XORQconst [c] x)
        for {
-               s := v.AuxInt
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 0) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = mem.Type
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpAMD64XORQconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 1
-       // result: (MOVBstore dst (MOVBload src mem) mem)
+       // match: (XORQ (MOVQconst [c]) x)
+       // cond: is32Bit(c)
+       // result: (XORQconst [c] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 1) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64MOVBstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(is32Bit(c)) {
+                       break
+               }
+               v.reset(OpAMD64XORQconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2
-       // result: (MOVWstore dst (MOVWload src mem) mem)
+       // match: (XORQ x x)
+       // cond:
+       // result: (MOVQconst [0])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2) {
+               x := v.Args[0]
+               if x != v.Args[1] {
                        break
                }
-               v.reset(OpAMD64MOVWstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4
-       // result: (MOVLstore dst (MOVLload src mem) mem)
+       return false
+}
+func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (XORQconst [c] (XORQconst [d] x))
+       // cond:
+       // result: (XORQconst [c ^ d] x)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64XORQconst {
                        break
                }
-               v.reset(OpAMD64MOVLstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpAMD64XORQconst)
+               v.AuxInt = c ^ d
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 8
-       // result: (MOVQstore dst (MOVQload src mem) mem)
+       // match: (XORQconst [0] x)
+       // cond:
+       // result: x
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 8) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpAMD64MOVQstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 16
-       // result: (MOVOstore dst (MOVOload src mem) mem)
+       // match: (XORQconst [c] (MOVQconst [d]))
+       // cond:
+       // result: (MOVQconst [c^d])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 16) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpAMD64MOVQconst {
                        break
                }
-               v.reset(OpAMD64MOVOstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = c ^ d
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 3
-       // result: (MOVBstore [2] dst (MOVBload [2] src mem)            (MOVWstore dst (MOVWload src mem) mem))
+       return false
+}
+func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add16  x y)
+       // cond:
+       // result: (ADDL  x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 3) {
-                       break
-               }
-               v.reset(OpAMD64MOVBstore)
-               v.AuxInt = 2
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
-               v0.AuxInt = 2
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ADDL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 5
-       // result: (MOVBstore [4] dst (MOVBload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
+}
+func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32  x y)
+       // cond:
+       // result: (ADDL  x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 5) {
-                       break
-               }
-               v.reset(OpAMD64MOVBstore)
-               v.AuxInt = 4
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
-               v0.AuxInt = 4
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ADDL)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32F x y)
+       // cond:
+       // result: (ADDSS x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ADDSS)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add64  x y)
+       // cond:
+       // result: (ADDQ  x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ADDQ)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 6
-       // result: (MOVWstore [4] dst (MOVWload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
+}
+func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add64F x y)
+       // cond:
+       // result: (ADDSD x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 6) {
-                       break
-               }
-               v.reset(OpAMD64MOVWstore)
-               v.AuxInt = 4
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
-               v0.AuxInt = 4
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ADDSD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 7
-       // result: (MOVLstore [3] dst (MOVLload [3] src mem)            (MOVLstore dst (MOVLload src mem) mem))
+}
+func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add8   x y)
+       // cond:
+       // result: (ADDL  x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 7) {
-                       break
-               }
-               v.reset(OpAMD64MOVLstore)
-               v.AuxInt = 3
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-               v0.AuxInt = 3
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ADDL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
-       // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem)              (MOVQstore dst (MOVQload src mem) mem))
+}
+func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (AddPtr x y)
+       // cond:
+       // result: (ADDQ  x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
-                       break
-               }
-               v.reset(OpAMD64MOVQstore)
-               v.AuxInt = SizeAndAlign(s).Size() - 8
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-               v0.AuxInt = SizeAndAlign(s).Size() - 8
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ADDQ)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
-       // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]             (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])          (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])          (MOVQstore dst (MOVQload src mem) mem))
+}
+func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Addr {sym} base)
+       // cond:
+       // result: (LEAQ {sym} base)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
-                       break
-               }
-               v.reset(OpMove)
-               v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
-               v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
-               v0.AddArg(dst)
-               v0.AuxInt = SizeAndAlign(s).Size() % 16
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
-               v1.AddArg(src)
-               v1.AuxInt = SizeAndAlign(s).Size() % 16
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
-               v2.AddArg(dst)
-               v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-               v3.AddArg(src)
-               v3.AddArg(mem)
-               v2.AddArg(v3)
-               v2.AddArg(mem)
-               v.AddArg(v2)
+               sym := v.Aux
+               base := v.Args[0]
+               v.reset(OpAMD64LEAQ)
+               v.Aux = sym
+               v.AddArg(base)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
-       // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]             (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])          (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])          (MOVOstore dst (MOVOload src mem) mem))
+}
+func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And16 x y)
+       // cond:
+       // result: (ANDL x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
-                       break
-               }
-               v.reset(OpMove)
-               v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
-               v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
-               v0.AddArg(dst)
-               v0.AuxInt = SizeAndAlign(s).Size() % 16
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
-               v1.AddArg(src)
-               v1.AuxInt = SizeAndAlign(s).Size() % 16
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
-               v2.AddArg(dst)
-               v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
-               v3.AddArg(src)
-               v3.AddArg(mem)
-               v2.AddArg(v3)
-               v2.AddArg(mem)
-               v.AddArg(v2)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0      && !config.noDuffDevice
-       // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
+}
+func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And32 x y)
+       // cond:
+       // result: (ANDL x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
-                       break
-               }
-               v.reset(OpAMD64DUFFCOPY)
-               v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
-               v.AddArg(dst)
-               v.AddArg(src)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
-       // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
+}
+func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And64 x y)
+       // cond:
+       // result: (ANDQ x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
-                       break
-               }
-               v.reset(OpAMD64REPMOVSQ)
-               v.AddArg(dst)
-               v.AddArg(src)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-               v0.AuxInt = SizeAndAlign(s).Size() / 8
-               v.AddArg(v0)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul16  x y)
+       // match: (And8  x y)
        // cond:
-       // result: (MULL  x y)
+       // result: (ANDL x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64MULL)
+               v.reset(OpAMD64ANDL)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32  x y)
+       // match: (AndB x y)
        // cond:
-       // result: (MULL  x y)
+       // result: (ANDL x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64MULL)
+               v.reset(OpAMD64ANDL)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32F x y)
+       // match: (Avg64u x y)
        // cond:
-       // result: (MULSS x y)
+       // result: (AVGQU x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64MULSS)
+               v.reset(OpAMD64AVGQU)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul64  x y)
+       // match: (Bswap32 x)
        // cond:
-       // result: (MULQ  x y)
+       // result: (BSWAPL x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64MULQ)
+               v.reset(OpAMD64BSWAPL)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul64F x y)
+       // match: (Bswap64 x)
        // cond:
-       // result: (MULSD x y)
+       // result: (BSWAPQ x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64MULSD)
+               v.reset(OpAMD64BSWAPQ)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul8   x y)
+       // match: (ClosureCall [argwid] entry closure mem)
        // cond:
-       // result: (MULL  x y)
+       // result: (CALLclosure [argwid] entry closure mem)
+       for {
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               closure := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpAMD64CALLclosure)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(closure)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Com16 x)
+       // cond:
+       // result: (NOTL x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64MULL)
+               v.reset(OpAMD64NOTL)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NEGL (MOVLconst [c]))
+       // match: (Com32 x)
        // cond:
-       // result: (MOVLconst [int64(int32(-c))])
+       // result: (NOTL x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
+               x := v.Args[0]
+               v.reset(OpAMD64NOTL)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Com64 x)
+       // cond:
+       // result: (NOTQ x)
+       for {
+               x := v.Args[0]
+               v.reset(OpAMD64NOTQ)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Com8  x)
+       // cond:
+       // result: (NOTL x)
+       for {
+               x := v.Args[0]
+               v.reset(OpAMD64NOTL)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const16  [val])
+       // cond:
+       // result: (MOVLconst [val])
+       for {
+               val := v.AuxInt
                v.reset(OpAMD64MOVLconst)
-               v.AuxInt = int64(int32(-c))
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const32  [val])
+       // cond:
+       // result: (MOVLconst [val])
+       for {
+               val := v.AuxInt
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const32F [val])
+       // cond:
+       // result: (MOVSSconst [val])
+       for {
+               val := v.AuxInt
+               v.reset(OpAMD64MOVSSconst)
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const64  [val])
+       // cond:
+       // result: (MOVQconst [val])
+       for {
+               val := v.AuxInt
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const64F [val])
+       // cond:
+       // result: (MOVSDconst [val])
+       for {
+               val := v.AuxInt
+               v.reset(OpAMD64MOVSDconst)
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const8   [val])
+       // cond:
+       // result: (MOVLconst [val])
+       for {
+               val := v.AuxInt
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = val
+               return true
+       }
+}
+func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ConstBool [b])
+       // cond:
+       // result: (MOVLconst [b])
+       for {
+               b := v.AuxInt
+               v.reset(OpAMD64MOVLconst)
+               v.AuxInt = b
+               return true
+       }
+}
+func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ConstNil)
+       // cond:
+       // result: (MOVQconst [0])
+       for {
+               v.reset(OpAMD64MOVQconst)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Convert <t> x mem)
+       // cond:
+       // result: (MOVQconvert <t> x mem)
+       for {
+               t := v.Type
+               x := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpAMD64MOVQconvert)
+               v.Type = t
+               v.AddArg(x)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueAMD64_OpCtz16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Ctz16 <t> x)
+       // cond:
+       // result: (CMOVWEQconst (BSFW <t> x) (CMPWconst x [0]) [16])
+       for {
+               t := v.Type
+               x := v.Args[0]
+               v.reset(OpAMD64CMOVWEQconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpAMD64BSFW, t)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v1.AuxInt = 0
+               v1.AddArg(x)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NEGQ (MOVQconst [c]))
+       // match: (Ctz32 <t> x)
        // cond:
-       // result: (MOVQconst [-c])
+       // result: (CMOVLEQconst (BSFL <t> x) (CMPLconst x [0]) [32])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_0.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = -c
+               t := v.Type
+               x := v.Args[0]
+               v.reset(OpAMD64CMOVLEQconst)
+               v.AuxInt = 32
+               v0 := b.NewValue0(v.Line, OpAMD64BSFL, t)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v1.AuxInt = 0
+               v1.AddArg(x)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NOTL (MOVLconst [c]))
+       // match: (Ctz64 <t> x)
        // cond:
-       // result: (MOVLconst [^c])
+       // result: (CMOVQEQconst (BSFQ <t> x) (CMPQconst x [0]) [64])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = ^c
+               t := v.Type
+               x := v.Args[0]
+               v.reset(OpAMD64CMOVQEQconst)
+               v.AuxInt = 64
+               v0 := b.NewValue0(v.Line, OpAMD64BSFQ, t)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v1.AuxInt = 0
+               v1.AddArg(x)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NOTQ (MOVQconst [c]))
+       // match: (Cvt32Fto32 x)
        // cond:
-       // result: (MOVQconst [^c])
+       // result: (CVTTSS2SL x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_0.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = ^c
+               x := v.Args[0]
+               v.reset(OpAMD64CVTTSS2SL)
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg16  x)
+       // match: (Cvt32Fto64 x)
        // cond:
-       // result: (NEGL x)
+       // result: (CVTTSS2SQ x)
        for {
                x := v.Args[0]
-               v.reset(OpAMD64NEGL)
+               v.reset(OpAMD64CVTTSS2SQ)
                v.AddArg(x)
                return true
        }
 }
-func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32  x)
+       // match: (Cvt32Fto64F x)
        // cond:
-       // result: (NEGL x)
+       // result: (CVTSS2SD x)
        for {
                x := v.Args[0]
-               v.reset(OpAMD64NEGL)
+               v.reset(OpAMD64CVTSS2SD)
                v.AddArg(x)
                return true
        }
 }
-func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32F x)
+       // match: (Cvt32to32F x)
        // cond:
-       // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
+       // result: (CVTSL2SS x)
        for {
                x := v.Args[0]
-               v.reset(OpAMD64PXOR)
+               v.reset(OpAMD64CVTSL2SS)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
-               v0.AuxInt = f2i(math.Copysign(0, -1))
-               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg64  x)
+       // match: (Cvt32to64F x)
        // cond:
-       // result: (NEGQ x)
+       // result: (CVTSL2SD x)
        for {
                x := v.Args[0]
-               v.reset(OpAMD64NEGQ)
+               v.reset(OpAMD64CVTSL2SD)
                v.AddArg(x)
                return true
        }
 }
-func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg64F x)
+       // match: (Cvt64Fto32 x)
        // cond:
-       // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
+       // result: (CVTTSD2SL x)
        for {
                x := v.Args[0]
-               v.reset(OpAMD64PXOR)
+               v.reset(OpAMD64CVTTSD2SL)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
-               v0.AuxInt = f2i(math.Copysign(0, -1))
-               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg8   x)
+       // match: (Cvt64Fto32F x)
        // cond:
-       // result: (NEGL x)
+       // result: (CVTSD2SS x)
        for {
                x := v.Args[0]
-               v.reset(OpAMD64NEGL)
+               v.reset(OpAMD64CVTSD2SS)
                v.AddArg(x)
                return true
        }
 }
-func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq16  x y)
+       // match: (Cvt64Fto64 x)
        // cond:
-       // result: (SETNE (CMPW x y))
+       // result: (CVTTSD2SQ x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETNE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpAMD64CVTTSD2SQ)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq32  x y)
+       // match: (Cvt64to32F x)
        // cond:
-       // result: (SETNE (CMPL x y))
+       // result: (CVTSQ2SS x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETNE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpAMD64CVTSQ2SS)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq32F x y)
+       // match: (Cvt64to64F x)
        // cond:
-       // result: (SETNEF (UCOMISS x y))
+       // result: (CVTSQ2SD x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETNEF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpAMD64CVTSQ2SD)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq64  x y)
+       // match: (DeferCall [argwid] mem)
        // cond:
-       // result: (SETNE (CMPQ x y))
+       // result: (CALLdefer [argwid] mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SETNE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpAMD64CALLdefer)
+               v.AuxInt = argwid
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq64F x y)
+       // match: (Div16  x y)
        // cond:
-       // result: (SETNEF (UCOMISD x y))
+       // result: (Select0 (DIVW  x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SETNEF)
-               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq8   x y)
+       // match: (Div16u x y)
        // cond:
-       // result: (SETNE (CMPB x y))
+       // result: (Select0 (DIVWU x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SETNE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NeqB   x y)
+       // match: (Div32  x y)
        // cond:
-       // result: (SETNE (CMPB x y))
+       // result: (Select0 (DIVL  x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SETNE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NeqPtr x y)
+       // match: (Div32F x y)
        // cond:
-       // result: (SETNE (CMPQ x y))
+       // result: (DIVSS x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SETNE)
-               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpAMD64DIVSS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NilCheck ptr mem)
+       // match: (Div32u x y)
        // cond:
-       // result: (LoweredNilCheck ptr mem)
+       // result: (Select0 (DIVLU x y))
        for {
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpAMD64LoweredNilCheck)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Not x)
+       // match: (Div64  x y)
        // cond:
-       // result: (XORLconst [1] x)
+       // result: (Select0 (DIVQ  x y))
        for {
                x := v.Args[0]
-               v.reset(OpAMD64XORLconst)
-               v.AuxInt = 1
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORL x (MOVLconst [c]))
+       // match: (Div64F x y)
        // cond:
-       // result: (ORLconst [c] x)
+       // result: (DIVSD x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64ORLconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (ORL (MOVLconst [c]) x)
-       // cond:
-       // result: (ORLconst [c] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpAMD64ORLconst)
-               v.AuxInt = c
+               y := v.Args[1]
+               v.reset(OpAMD64DIVSD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (ORL x x)
+}
+func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div64u x y)
        // cond:
-       // result: x
+       // result: (Select0 (DIVQU x y))
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (ORL                  x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-       // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
-       for {
-               x0 := v.Args[0]
-               if x0.Op != OpAMD64MOVBload {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               mem := x0.Args[1]
-               s0 := v.Args[1]
-               if s0.Op != OpAMD64SHLLconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if mem != x1.Args[1] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
-               v.reset(OpCopy)
-               v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(mem)
-               return true
-       }
-       // match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
-       // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem)
-       for {
-               o0 := v.Args[0]
-               if o0.Op != OpAMD64ORL {
-                       break
-               }
-               o1 := o0.Args[0]
-               if o1.Op != OpAMD64ORL {
-                       break
-               }
-               x0 := o1.Args[0]
-               if x0.Op != OpAMD64MOVBload {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               mem := x0.Args[1]
-               s0 := o1.Args[1]
-               if s0.Op != OpAMD64SHLLconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if mem != x1.Args[1] {
-                       break
-               }
-               s1 := o0.Args[1]
-               if s1.Op != OpAMD64SHLLconst {
-                       break
-               }
-               if s1.AuxInt != 16 {
-                       break
-               }
-               x2 := s1.Args[0]
-               if x2.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x2.AuxInt != i+2 {
-                       break
-               }
-               if x2.Aux != s {
-                       break
-               }
-               if p != x2.Args[0] {
-                       break
-               }
-               if mem != x2.Args[1] {
-                       break
-               }
-               s2 := v.Args[1]
-               if s2.Op != OpAMD64SHLLconst {
-                       break
-               }
-               if s2.AuxInt != 24 {
-                       break
-               }
-               x3 := s2.Args[0]
-               if x3.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x3.AuxInt != i+3 {
-                       break
-               }
-               if x3.Aux != s {
-                       break
-               }
-               if p != x3.Args[0] {
-                       break
-               }
-               if mem != x3.Args[1] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1, x2, x3)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-               v.reset(OpCopy)
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(mem)
                return true
        }
-       // match: (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-       // result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
+}
+func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div8   x y)
+       // cond:
+       // result: (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
        for {
-               x0 := v.Args[0]
-               if x0.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               idx := x0.Args[1]
-               mem := x0.Args[2]
-               s0 := v.Args[1]
-               if s0.Op != OpAMD64SHLLconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if idx != x1.Args[1] {
-                       break
-               }
-               if mem != x1.Args[2] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
-               v.reset(OpCopy)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+               v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
                return true
        }
-       // match: (ORL o0:(ORL o1:(ORL                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
-       // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
+}
+func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div8u  x y)
+       // cond:
+       // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
        for {
-               o0 := v.Args[0]
-               if o0.Op != OpAMD64ORL {
-                       break
-               }
-               o1 := o0.Args[0]
-               if o1.Op != OpAMD64ORL {
-                       break
-               }
-               x0 := o1.Args[0]
-               if x0.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               idx := x0.Args[1]
-               mem := x0.Args[2]
-               s0 := o1.Args[1]
-               if s0.Op != OpAMD64SHLLconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if idx != x1.Args[1] {
-                       break
-               }
-               if mem != x1.Args[2] {
-                       break
-               }
-               s1 := o0.Args[1]
-               if s1.Op != OpAMD64SHLLconst {
-                       break
-               }
-               if s1.AuxInt != 16 {
-                       break
-               }
-               x2 := s1.Args[0]
-               if x2.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x2.AuxInt != i+2 {
-                       break
-               }
-               if x2.Aux != s {
-                       break
-               }
-               if p != x2.Args[0] {
-                       break
-               }
-               if idx != x2.Args[1] {
-                       break
-               }
-               if mem != x2.Args[2] {
-                       break
-               }
-               s2 := v.Args[1]
-               if s2.Op != OpAMD64SHLLconst {
-                       break
-               }
-               if s2.AuxInt != 24 {
-                       break
-               }
-               x3 := s2.Args[0]
-               if x3.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x3.AuxInt != i+3 {
-                       break
-               }
-               if x3.Aux != s {
-                       break
-               }
-               if p != x3.Args[0] {
-                       break
-               }
-               if idx != x3.Args[1] {
-                       break
-               }
-               if mem != x3.Args[2] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1, x2, x3)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
-               v.reset(OpCopy)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect0)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+               v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORLconst [c] x)
-       // cond: int32(c)==0
-       // result: x
+       // match: (Eq16  x y)
+       // cond:
+       // result: (SETEQ (CMPW x y))
        for {
-               c := v.AuxInt
                x := v.Args[0]
-               if !(int32(c) == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64SETEQ)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORLconst [c] _)
-       // cond: int32(c)==-1
-       // result: (MOVLconst [-1])
+}
+func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32  x y)
+       // cond:
+       // result: (SETEQ (CMPL x y))
        for {
-               c := v.AuxInt
-               if !(int32(c) == -1) {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = -1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETEQ)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORLconst [c] (MOVLconst [d]))
+}
+func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq32F x y)
        // cond:
-       // result: (MOVLconst [c|d])
+       // result: (SETEQF (UCOMISS x y))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = c | d
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETEQF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORQ x (MOVQconst [c]))
-       // cond: is32Bit(c)
-       // result: (ORQconst [c] x)
+       // match: (Eq64  x y)
+       // cond:
+       // result: (SETEQ (CMPQ x y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(is32Bit(c)) {
-                       break
-               }
-               v.reset(OpAMD64ORQconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64SETEQ)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORQ (MOVQconst [c]) x)
-       // cond: is32Bit(c)
-       // result: (ORQconst [c] x)
+}
+func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq64F x y)
+       // cond:
+       // result: (SETEQF (UCOMISD x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETEQF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Eq8   x y)
+       // cond:
+       // result: (SETEQ (CMPB x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(is32Bit(c)) {
-                       break
-               }
-               v.reset(OpAMD64ORQconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETEQ)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORQ x x)
+}
+func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (EqB   x y)
        // cond:
-       // result: x
+       // result: (SETEQ (CMPB x y))
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64SETEQ)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLQconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem)))     s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem)))     s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem)))     s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem)))     s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-       // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem)
+}
+func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (EqPtr x y)
+       // cond:
+       // result: (SETEQ (CMPQ x y))
        for {
-               o0 := v.Args[0]
-               if o0.Op != OpAMD64ORQ {
-                       break
-               }
-               o1 := o0.Args[0]
-               if o1.Op != OpAMD64ORQ {
-                       break
-               }
-               o2 := o1.Args[0]
-               if o2.Op != OpAMD64ORQ {
-                       break
-               }
-               o3 := o2.Args[0]
-               if o3.Op != OpAMD64ORQ {
-                       break
-               }
-               o4 := o3.Args[0]
-               if o4.Op != OpAMD64ORQ {
-                       break
-               }
-               o5 := o4.Args[0]
-               if o5.Op != OpAMD64ORQ {
-                       break
-               }
-               x0 := o5.Args[0]
-               if x0.Op != OpAMD64MOVBload {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               mem := x0.Args[1]
-               s0 := o5.Args[1]
-               if s0.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if mem != x1.Args[1] {
-                       break
-               }
-               s1 := o4.Args[1]
-               if s1.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s1.AuxInt != 16 {
-                       break
-               }
-               x2 := s1.Args[0]
-               if x2.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x2.AuxInt != i+2 {
-                       break
-               }
-               if x2.Aux != s {
-                       break
-               }
-               if p != x2.Args[0] {
-                       break
-               }
-               if mem != x2.Args[1] {
-                       break
-               }
-               s2 := o3.Args[1]
-               if s2.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s2.AuxInt != 24 {
-                       break
-               }
-               x3 := s2.Args[0]
-               if x3.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x3.AuxInt != i+3 {
-                       break
-               }
-               if x3.Aux != s {
-                       break
-               }
-               if p != x3.Args[0] {
-                       break
-               }
-               if mem != x3.Args[1] {
-                       break
-               }
-               s3 := o2.Args[1]
-               if s3.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s3.AuxInt != 32 {
-                       break
-               }
-               x4 := s3.Args[0]
-               if x4.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x4.AuxInt != i+4 {
-                       break
-               }
-               if x4.Aux != s {
-                       break
-               }
-               if p != x4.Args[0] {
-                       break
-               }
-               if mem != x4.Args[1] {
-                       break
-               }
-               s4 := o1.Args[1]
-               if s4.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s4.AuxInt != 40 {
-                       break
-               }
-               x5 := s4.Args[0]
-               if x5.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x5.AuxInt != i+5 {
-                       break
-               }
-               if x5.Aux != s {
-                       break
-               }
-               if p != x5.Args[0] {
-                       break
-               }
-               if mem != x5.Args[1] {
-                       break
-               }
-               s5 := o0.Args[1]
-               if s5.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s5.AuxInt != 48 {
-                       break
-               }
-               x6 := s5.Args[0]
-               if x6.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x6.AuxInt != i+6 {
-                       break
-               }
-               if x6.Aux != s {
-                       break
-               }
-               if p != x6.Args[0] {
-                       break
-               }
-               if mem != x6.Args[1] {
-                       break
-               }
-               s6 := v.Args[1]
-               if s6.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s6.AuxInt != 56 {
-                       break
-               }
-               x7 := s6.Args[0]
-               if x7.Op != OpAMD64MOVBload {
-                       break
-               }
-               if x7.AuxInt != i+7 {
-                       break
-               }
-               if x7.Aux != s {
-                       break
-               }
-               if p != x7.Args[0] {
-                       break
-               }
-               if mem != x7.Args[1] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-               v.reset(OpCopy)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETEQ)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(mem)
                return true
        }
-       // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLQconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))     s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem)))     s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem)))     s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem)))     s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem)))
-       // cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-       // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem)
+}
+func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq16  x y)
+       // cond:
+       // result: (SETGE (CMPW x y))
        for {
-               o0 := v.Args[0]
-               if o0.Op != OpAMD64ORQ {
-                       break
-               }
-               o1 := o0.Args[0]
-               if o1.Op != OpAMD64ORQ {
-                       break
-               }
-               o2 := o1.Args[0]
-               if o2.Op != OpAMD64ORQ {
-                       break
-               }
-               o3 := o2.Args[0]
-               if o3.Op != OpAMD64ORQ {
-                       break
-               }
-               o4 := o3.Args[0]
-               if o4.Op != OpAMD64ORQ {
-                       break
-               }
-               o5 := o4.Args[0]
-               if o5.Op != OpAMD64ORQ {
-                       break
-               }
-               x0 := o5.Args[0]
-               if x0.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               i := x0.AuxInt
-               s := x0.Aux
-               p := x0.Args[0]
-               idx := x0.Args[1]
-               mem := x0.Args[2]
-               s0 := o5.Args[1]
-               if s0.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s0.AuxInt != 8 {
-                       break
-               }
-               x1 := s0.Args[0]
-               if x1.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x1.AuxInt != i+1 {
-                       break
-               }
-               if x1.Aux != s {
-                       break
-               }
-               if p != x1.Args[0] {
-                       break
-               }
-               if idx != x1.Args[1] {
-                       break
-               }
-               if mem != x1.Args[2] {
-                       break
-               }
-               s1 := o4.Args[1]
-               if s1.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s1.AuxInt != 16 {
-                       break
-               }
-               x2 := s1.Args[0]
-               if x2.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x2.AuxInt != i+2 {
-                       break
-               }
-               if x2.Aux != s {
-                       break
-               }
-               if p != x2.Args[0] {
-                       break
-               }
-               if idx != x2.Args[1] {
-                       break
-               }
-               if mem != x2.Args[2] {
-                       break
-               }
-               s2 := o3.Args[1]
-               if s2.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s2.AuxInt != 24 {
-                       break
-               }
-               x3 := s2.Args[0]
-               if x3.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x3.AuxInt != i+3 {
-                       break
-               }
-               if x3.Aux != s {
-                       break
-               }
-               if p != x3.Args[0] {
-                       break
-               }
-               if idx != x3.Args[1] {
-                       break
-               }
-               if mem != x3.Args[2] {
-                       break
-               }
-               s3 := o2.Args[1]
-               if s3.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s3.AuxInt != 32 {
-                       break
-               }
-               x4 := s3.Args[0]
-               if x4.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x4.AuxInt != i+4 {
-                       break
-               }
-               if x4.Aux != s {
-                       break
-               }
-               if p != x4.Args[0] {
-                       break
-               }
-               if idx != x4.Args[1] {
-                       break
-               }
-               if mem != x4.Args[2] {
-                       break
-               }
-               s4 := o1.Args[1]
-               if s4.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s4.AuxInt != 40 {
-                       break
-               }
-               x5 := s4.Args[0]
-               if x5.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x5.AuxInt != i+5 {
-                       break
-               }
-               if x5.Aux != s {
-                       break
-               }
-               if p != x5.Args[0] {
-                       break
-               }
-               if idx != x5.Args[1] {
-                       break
-               }
-               if mem != x5.Args[2] {
-                       break
-               }
-               s5 := o0.Args[1]
-               if s5.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s5.AuxInt != 48 {
-                       break
-               }
-               x6 := s5.Args[0]
-               if x6.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x6.AuxInt != i+6 {
-                       break
-               }
-               if x6.Aux != s {
-                       break
-               }
-               if p != x6.Args[0] {
-                       break
-               }
-               if idx != x6.Args[1] {
-                       break
-               }
-               if mem != x6.Args[2] {
-                       break
-               }
-               s6 := v.Args[1]
-               if s6.Op != OpAMD64SHLQconst {
-                       break
-               }
-               if s6.AuxInt != 56 {
-                       break
-               }
-               x7 := s6.Args[0]
-               if x7.Op != OpAMD64MOVBloadidx1 {
-                       break
-               }
-               if x7.AuxInt != i+7 {
-                       break
-               }
-               if x7.Aux != s {
-                       break
-               }
-               if p != x7.Args[0] {
-                       break
-               }
-               if idx != x7.Args[1] {
-                       break
-               }
-               if mem != x7.Args[2] {
-                       break
-               }
-               if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-                       break
-               }
-               b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type)
-               v.reset(OpCopy)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETGE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq16U x y)
+       // cond:
+       // result: (SETAE (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETAE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v0.AuxInt = i
-               v0.Aux = s
-               v0.AddArg(p)
-               v0.AddArg(idx)
-               v0.AddArg(mem)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORQconst [0] x)
+       // match: (Geq32  x y)
        // cond:
-       // result: x
+       // result: (SETGE (CMPL x y))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64SETGE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORQconst [-1] _)
+}
+func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32F x y)
        // cond:
-       // result: (MOVQconst [-1])
+       // result: (SETGEF (UCOMISS x y))
        for {
-               if v.AuxInt != -1 {
-                       break
-               }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = -1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETGEF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (ORQconst [c] (MOVQconst [d]))
+}
+func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32U x y)
        // cond:
-       // result: (MOVQconst [c|d])
+       // result: (SETAE (CMPL x y))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = c | d
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETAE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OffPtr [off] ptr)
-       // cond: is32Bit(off)
-       // result: (ADDQconst [off] ptr)
+       // match: (Geq64  x y)
+       // cond:
+       // result: (SETGE (CMPQ x y))
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               if !(is32Bit(off)) {
-                       break
-               }
-               v.reset(OpAMD64ADDQconst)
-               v.AuxInt = off
-               v.AddArg(ptr)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETGE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (OffPtr [off] ptr)
+}
+func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq64F x y)
        // cond:
-       // result: (ADDQ (MOVQconst [off]) ptr)
+       // result: (SETGEF (UCOMISD x y))
        for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               v.reset(OpAMD64ADDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-               v0.AuxInt = off
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETGEF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(ptr)
                return true
        }
 }
-func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or16 x y)
+       // match: (Geq64U x y)
        // cond:
-       // result: (ORL x y)
+       // result: (SETAE (CMPQ x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ORL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64SETAE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or32 x y)
+       // match: (Geq8   x y)
        // cond:
-       // result: (ORL x y)
+       // result: (SETGE (CMPB x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ORL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64SETGE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or64 x y)
+       // match: (Geq8U  x y)
        // cond:
-       // result: (ORQ x y)
+       // result: (SETAE (CMPB x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ORQ)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64SETAE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GetClosurePtr)
+       // cond:
+       // result: (LoweredGetClosurePtr)
+       for {
+               v.reset(OpAMD64LoweredGetClosurePtr)
+               return true
+       }
+}
+func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GetG mem)
+       // cond:
+       // result: (LoweredGetG mem)
+       for {
+               mem := v.Args[0]
+               v.reset(OpAMD64LoweredGetG)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GoCall [argwid] mem)
+       // cond:
+       // result: (CALLgo [argwid] mem)
+       for {
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpAMD64CALLgo)
+               v.AuxInt = argwid
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater16  x y)
+       // cond:
+       // result: (SETG (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETG)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater16U x y)
+       // cond:
+       // result: (SETA (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETA)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32  x y)
+       // cond:
+       // result: (SETG (CMPL x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETG)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32F x y)
+       // cond:
+       // result: (SETGF (UCOMISS x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETGF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or8  x y)
+       // match: (Greater32U x y)
        // cond:
-       // result: (ORL x y)
+       // result: (SETA (CMPL x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ORL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64SETA)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OrB x y)
+       // match: (Greater64  x y)
        // cond:
-       // result: (ORL x y)
+       // result: (SETG (CMPQ x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ORL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64SETG)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ROLBconst [c] (ROLBconst [d] x))
-       // cond:
-       // result: (ROLBconst [(c+d)& 7] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ROLBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64ROLBconst)
-               v.AuxInt = (c + d) & 7
-               v.AddArg(x)
-               return true
-       }
-       // match: (ROLBconst [0] x)
+       // match: (Greater64F x y)
        // cond:
-       // result: x
+       // result: (SETGF (UCOMISD x y))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64SETGF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ROLLconst [c] (ROLLconst [d] x))
-       // cond:
-       // result: (ROLLconst [(c+d)&31] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ROLLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64ROLLconst)
-               v.AuxInt = (c + d) & 31
-               v.AddArg(x)
-               return true
-       }
-       // match: (ROLLconst [0] x)
+       // match: (Greater64U x y)
        // cond:
-       // result: x
+       // result: (SETA (CMPQ x y))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64SETA)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ROLQconst [c] (ROLQconst [d] x))
-       // cond:
-       // result: (ROLQconst [(c+d)&63] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ROLQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64ROLQconst)
-               v.AuxInt = (c + d) & 63
-               v.AddArg(x)
-               return true
-       }
-       // match: (ROLQconst [0] x)
+       // match: (Greater8   x y)
        // cond:
-       // result: x
+       // result: (SETG (CMPB x y))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64SETG)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ROLWconst [c] (ROLWconst [d] x))
-       // cond:
-       // result: (ROLWconst [(c+d)&15] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64ROLWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64ROLWconst)
-               v.AuxInt = (c + d) & 15
-               v.AddArg(x)
-               return true
-       }
-       // match: (ROLWconst [0] x)
+       // match: (Greater8U  x y)
        // cond:
-       // result: x
+       // result: (SETA (CMPB x y))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64SETA)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux16 <t> x y)
+       // match: (Hmul16  x y)
        // cond:
-       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+       // result: (HMULW  x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 16
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               v.reset(OpAMD64HMULW)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux32 <t> x y)
+       // match: (Hmul16u x y)
        // cond:
-       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+       // result: (HMULWU x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 16
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               v.reset(OpAMD64HMULWU)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux64 <t> x y)
+       // match: (Hmul32  x y)
        // cond:
-       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+       // result: (HMULL  x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 16
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               v.reset(OpAMD64HMULL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux8  <t> x y)
+       // match: (Hmul32u x y)
        // cond:
-       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+       // result: (HMULLU x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 16
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               v.reset(OpAMD64HMULLU)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x16 <t> x y)
+       // match: (Hmul64  x y)
        // cond:
-       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+       // result: (HMULQ  x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARW)
-               v.Type = t
+               v.reset(OpAMD64HMULQ)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 16
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
-               v.AddArg(v0)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x32 <t> x y)
+       // match: (Hmul64u x y)
        // cond:
-       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+       // result: (HMULQU x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARW)
-               v.Type = t
+               v.reset(OpAMD64HMULQU)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 16
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
-               v.AddArg(v0)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x64 <t> x y)
+       // match: (Hmul8   x y)
        // cond:
-       // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
+       // result: (HMULB  x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARW)
-               v.Type = t
+               v.reset(OpAMD64HMULB)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 16
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
-               v.AddArg(v0)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x8  <t> x y)
+       // match: (Hmul8u  x y)
        // cond:
-       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+       // result: (HMULBU x y)
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARW)
-               v.Type = t
+               v.reset(OpAMD64HMULBU)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 16
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
-               v.AddArg(v0)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux16 <t> x y)
+       // match: (InterCall [argwid] entry mem)
        // cond:
-       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+       // result: (CALLinter [argwid] entry mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpAMD64CALLinter)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux32 <t> x y)
+       // match: (IsInBounds idx len)
        // cond:
-       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+       // result: (SETB (CMPQ idx len))
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpAMD64SETB)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux64 <t> x y)
+       // match: (IsNonNil p)
        // cond:
-       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+       // result: (SETNE (TESTQ p p))
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               p := v.Args[0]
+               v.reset(OpAMD64SETNE)
+               v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
+               v0.AddArg(p)
+               v0.AddArg(p)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux8  <t> x y)
+       // match: (IsSliceInBounds idx len)
        // cond:
-       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+       // result: (SETBE (CMPQ idx len))
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpAMD64SETBE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 32
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x16 <t> x y)
+       // match: (Leq16  x y)
        // cond:
-       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+       // result: (SETLE (CMPW x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARL)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETLE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 32
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x32 <t> x y)
+       // match: (Leq16U x y)
        // cond:
-       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+       // result: (SETBE (CMPW x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARL)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETBE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 32
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x64 <t> x y)
+       // match: (Leq32  x y)
        // cond:
-       // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
+       // result: (SETLE (CMPL x y))
        for {
-               t := v.Type
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpAMD64SARL)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
-               v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 32
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
+               y := v.Args[1]
+               v.reset(OpAMD64SETLE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x8  <t> x y)
+       // match: (Leq32F x y)
        // cond:
-       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+       // result: (SETGEF (UCOMISS y x))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARL)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETGEF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 32
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64Ux16 <t> x y)
+       // match: (Leq32U x y)
        // cond:
-       // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+       // result: (SETBE (CMPL x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+               v.reset(OpAMD64SETBE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 64
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64Ux32 <t> x y)
+       // match: (Leq64  x y)
        // cond:
-       // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+       // result: (SETLE (CMPQ x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+               v.reset(OpAMD64SETLE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 64
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64Ux64 <t> x y)
+       // match: (Leq64F x y)
        // cond:
-       // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+       // result: (SETGEF (UCOMISD y x))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
-               v0.AddArg(x)
+               v.reset(OpAMD64SETGEF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
                v0.AddArg(y)
+               v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 64
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64Ux8  <t> x y)
+       // match: (Leq64U x y)
        // cond:
-       // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+       // result: (SETBE (CMPQ x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+               v.reset(OpAMD64SETBE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 64
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64x16 <t> x y)
+       // match: (Leq8   x y)
        // cond:
-       // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+       // result: (SETLE (CMPB x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARQ)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETLE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 64
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64x32 <t> x y)
+       // match: (Leq8U  x y)
        // cond:
-       // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
+       // result: (SETBE (CMPB x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARQ)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETBE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 64
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64x64 <t> x y)
+       // match: (Less16  x y)
        // cond:
-       // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
+       // result: (SETL (CMPW x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARQ)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+               v.reset(OpAMD64SETL)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 64
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh64x8  <t> x y)
+       // match: (Less16U x y)
        // cond:
-       // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+       // result: (SETB (CMPW x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARQ)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETB)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 64
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux16 <t> x y)
+       // match: (Less32  x y)
        // cond:
-       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+       // result: (SETL (CMPL x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+               v.reset(OpAMD64SETL)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 8
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux32 <t> x y)
+       // match: (Less32F x y)
        // cond:
-       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+       // result: (SETGF (UCOMISS y x))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
-               v0.AddArg(x)
+               v.reset(OpAMD64SETGF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
                v0.AddArg(y)
+               v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 8
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux64 <t> x y)
+       // match: (Less32U x y)
        // cond:
-       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+       // result: (SETB (CMPL x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+               v.reset(OpAMD64SETB)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 8
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux8  <t> x y)
+       // match: (Less64  x y)
        // cond:
-       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+       // result: (SETL (CMPQ x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64ANDL)
-               v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+               v.reset(OpAMD64SETL)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v2.AddArg(y)
-               v2.AuxInt = 8
-               v1.AddArg(v2)
-               v.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x16 <t> x y)
+       // match: (Less64F x y)
        // cond:
-       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+       // result: (SETGF (UCOMISD y x))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARB)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETGF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 8
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x32 <t> x y)
+       // match: (Less64U x y)
        // cond:
-       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+       // result: (SETB (CMPQ x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARB)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETB)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 8
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x64 <t> x y)
+       // match: (Less8   x y)
        // cond:
-       // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
+       // result: (SETL (CMPB x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARB)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+               v.reset(OpAMD64SETL)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 8
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x8  <t> x y)
+       // match: (Less8U  x y)
        // cond:
-       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+       // result: (SETB (CMPB x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpAMD64SARB)
-               v.Type = t
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v.reset(OpAMD64SETB)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-               v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-               v3.AddArg(y)
-               v3.AuxInt = 8
-               v2.AddArg(v3)
-               v1.AddArg(v2)
-               v0.AddArg(v1)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARB x (MOVQconst [c]))
-       // cond:
-       // result: (SARBconst [c&31] x)
+       // match: (Load <t> ptr mem)
+       // cond: (is64BitInt(t) || isPtr(t))
+       // result: (MOVQload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitInt(t) || isPtr(t)) {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SARBconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               v.reset(OpAMD64MOVQload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SARB x (MOVLconst [c]))
-       // cond:
-       // result: (SARBconst [c&31] x)
+       // match: (Load <t> ptr mem)
+       // cond: is32BitInt(t)
+       // result: (MOVLload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitInt(t)) {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SARBconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               v.reset(OpAMD64MOVLload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SARBconst [c] (MOVQconst [d]))
-       // cond:
-       // result: (MOVQconst [d>>uint64(c)])
+       // match: (Load <t> ptr mem)
+       // cond: is16BitInt(t)
+       // result: (MOVWload ptr mem)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t)) {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = d >> uint64(c)
+               v.reset(OpAMD64MOVWload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SARL x (MOVQconst [c]))
-       // cond:
-       // result: (SARLconst [c&31] x)
+       // match: (Load <t> ptr mem)
+       // cond: (t.IsBoolean() || is8BitInt(t))
+       // result: (MOVBload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(t.IsBoolean() || is8BitInt(t)) {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SARLconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               v.reset(OpAMD64MOVBload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SARL x (MOVLconst [c]))
-       // cond:
-       // result: (SARLconst [c&31] x)
+       // match: (Load <t> ptr mem)
+       // cond: is32BitFloat(t)
+       // result: (MOVSSload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitFloat(t)) {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SARLconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               v.reset(OpAMD64MOVSSload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SARL x (ANDLconst [31] y))
-       // cond:
-       // result: (SARL x y)
+       // match: (Load <t> ptr mem)
+       // cond: is64BitFloat(t)
+       // result: (MOVSDload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ANDLconst {
-                       break
-               }
-               if v_1.AuxInt != 31 {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitFloat(t)) {
                        break
                }
-               y := v_1.Args[0]
-               v.reset(OpAMD64SARL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpAMD64MOVSDload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARLconst [c] (MOVQconst [d]))
+       // match: (Lrot16 <t> x [c])
        // cond:
-       // result: (MOVQconst [d>>uint64(c)])
+       // result: (ROLWconst <t> [c&15] x)
        for {
+               t := v.Type
                c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = d >> uint64(c)
+               x := v.Args[0]
+               v.reset(OpAMD64ROLWconst)
+               v.Type = t
+               v.AuxInt = c & 15
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot32 <t> x [c])
+       // cond:
+       // result: (ROLLconst <t> [c&31] x)
+       for {
+               t := v.Type
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpAMD64ROLLconst)
+               v.Type = t
+               v.AuxInt = c & 31
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARQ x (MOVQconst [c]))
+       // match: (Lrot64 <t> x [c])
        // cond:
-       // result: (SARQconst [c&63] x)
+       // result: (ROLQconst <t> [c&63] x)
        for {
+               t := v.Type
+               c := v.AuxInt
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SARQconst)
+               v.reset(OpAMD64ROLQconst)
+               v.Type = t
                v.AuxInt = c & 63
                v.AddArg(x)
                return true
        }
-       // match: (SARQ x (MOVLconst [c]))
+}
+func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot8  <t> x [c])
        // cond:
-       // result: (SARQconst [c&63] x)
+       // result: (ROLBconst <t> [c&7] x)
        for {
+               t := v.Type
+               c := v.AuxInt
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SARQconst)
-               v.AuxInt = c & 63
+               v.reset(OpAMD64ROLBconst)
+               v.Type = t
+               v.AuxInt = c & 7
                v.AddArg(x)
                return true
        }
-       // match: (SARQ x (ANDQconst [63] y))
+}
+func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x16 <t> x y)
        // cond:
-       // result: (SARQ x y)
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ANDQconst {
-                       break
-               }
-               if v_1.AuxInt != 63 {
-                       break
-               }
-               y := v_1.Args[0]
-               v.reset(OpAMD64SARQ)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARQconst [c] (MOVQconst [d]))
+       // match: (Lsh16x32 <t> x y)
        // cond:
-       // result: (MOVQconst [d>>uint64(c)])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = d >> uint64(c)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARW x (MOVQconst [c]))
+       // match: (Lsh16x64 <t> x y)
        // cond:
-       // result: (SARWconst [c&31] x)
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SARWconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SARW x (MOVLconst [c]))
+}
+func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x8  <t> x y)
        // cond:
-       // result: (SARWconst [c&31] x)
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SARWconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SARWconst [c] (MOVQconst [d]))
+       // match: (Lsh32x16 <t> x y)
        // cond:
-       // result: (MOVQconst [d>>uint64(c)])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = d >> uint64(c)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBBLcarrymask (FlagEQ))
+       // match: (Lsh32x32 <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBLcarrymask (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x64 <t> x y)
        // cond:
-       // result: (MOVLconst [-1])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = -1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBLcarrymask (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x8  <t> x y)
        // cond:
-       // result: (MOVLconst [0])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBLcarrymask (FlagGT_ULT))
+}
+func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x16 <t> x y)
        // cond:
-       // result: (MOVLconst [-1])
+       // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = -1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBLcarrymask (FlagGT_UGT))
+}
+func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x32 <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBBQcarrymask (FlagEQ))
+       // match: (Lsh64x64 <t> x y)
        // cond:
-       // result: (MOVQconst [0])
+       // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBQcarrymask (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh64x8  <t> x y)
        // cond:
-       // result: (MOVQconst [-1])
+       // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = -1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBQcarrymask (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x16 <t> x y)
        // cond:
-       // result: (MOVQconst [0])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBQcarrymask (FlagGT_ULT))
+}
+func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x32 <t> x y)
        // cond:
-       // result: (MOVQconst [-1])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = -1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SBBQcarrymask (FlagGT_UGT))
+}
+func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x64 <t> x y)
        // cond:
-       // result: (MOVQconst [0])
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETA (InvertFlags x))
+       // match: (Lsh8x8  <t> x y)
        // cond:
-       // result: (SETB x)
+       // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETB)
-               v.AddArg(x)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETA (FlagEQ))
+}
+func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16  x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (Select1 (DIVW  x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETA (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16u x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (Select1 (DIVWU x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETA (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32  x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (Select1 (DIVL  x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETA (FlagGT_ULT))
+}
+func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32u x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (Select1 (DIVLU x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETA (FlagGT_UGT))
+}
+func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod64  x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (Select1 (DIVQ  x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETAE (InvertFlags x))
+       // match: (Mod64u x y)
        // cond:
-       // result: (SETBE x)
+       // result: (Select1 (DIVQU x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETBE)
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETAE (FlagEQ))
+}
+func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8   x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
+               v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETAE (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8u  x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpSelect1)
+               v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
+               v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETAE (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [1])
+}
+func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Move [s] _ _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
+               s := v.AuxInt
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 0) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
                return true
        }
-       // match: (SETAE (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore dst (MOVBload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 1) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               v.reset(OpAMD64MOVBstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETAE (FlagGT_UGT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVWstore dst (MOVWload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               v.reset(OpAMD64MOVWstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SETB (InvertFlags x))
-       // cond:
-       // result: (SETA x)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVLstore dst (MOVLload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4) {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETA)
-               v.AddArg(x)
+               v.reset(OpAMD64MOVLstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETB (FlagEQ))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 8
+       // result: (MOVQstore dst (MOVQload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 8) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               v.reset(OpAMD64MOVQstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETB (FlagLT_ULT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 16
+       // result: (MOVOstore dst (MOVOload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 16) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               v.reset(OpAMD64MOVOstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETB (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 3
+       // result: (MOVBstore [2] dst (MOVBload [2] src mem)            (MOVWstore dst (MOVWload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 3) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               v.reset(OpAMD64MOVBstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETB (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 5
+       // result: (MOVBstore [4] dst (MOVBload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 5) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               v.reset(OpAMD64MOVBstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETB (FlagGT_UGT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 6
+       // result: (MOVWstore [4] dst (MOVWload [4] src mem)            (MOVLstore dst (MOVLload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 6) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               v.reset(OpAMD64MOVWstore)
+               v.AuxInt = 4
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
+               v0.AuxInt = 4
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       return false
-}
-func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SETBE (InvertFlags x))
-       // cond:
-       // result: (SETAE x)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 7
+       // result: (MOVLstore [3] dst (MOVLload [3] src mem)            (MOVLstore dst (MOVLload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 7) {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETAE)
-               v.AddArg(x)
+               v.reset(OpAMD64MOVLstore)
+               v.AuxInt = 3
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+               v0.AuxInt = 3
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETBE (FlagEQ))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
+       // result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem)              (MOVQstore dst (MOVQload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               v.reset(OpAMD64MOVQstore)
+               v.AuxInt = SizeAndAlign(s).Size() - 8
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+               v0.AuxInt = SizeAndAlign(s).Size() - 8
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETBE (FlagLT_ULT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
+       // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]             (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])          (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])          (MOVQstore dst (MOVQload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               v.reset(OpMove)
+               v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
+               v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
+               v0.AuxInt = SizeAndAlign(s).Size() % 16
+               v0.AddArg(dst)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
+               v1.AuxInt = SizeAndAlign(s).Size() % 16
+               v1.AddArg(src)
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
+               v2.AddArg(dst)
+               v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
+               v3.AddArg(src)
+               v3.AddArg(mem)
+               v2.AddArg(v3)
+               v2.AddArg(mem)
+               v.AddArg(v2)
                return true
        }
-       // match: (SETBE (FlagLT_UGT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
+       // result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16]             (ADDQconst <dst.Type> dst [SizeAndAlign(s).Size()%16])          (ADDQconst <src.Type> src [SizeAndAlign(s).Size()%16])          (MOVOstore dst (MOVOload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               v.reset(OpMove)
+               v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
+               v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type)
+               v0.AuxInt = SizeAndAlign(s).Size() % 16
+               v0.AddArg(dst)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type)
+               v1.AuxInt = SizeAndAlign(s).Size() % 16
+               v1.AddArg(src)
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
+               v2.AddArg(dst)
+               v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
+               v3.AddArg(src)
+               v3.AddArg(mem)
+               v2.AddArg(v3)
+               v2.AddArg(mem)
+               v.AddArg(v2)
                return true
        }
-       // match: (SETBE (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [1])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0      && !config.noDuffDevice
+       // result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               v.reset(OpAMD64DUFFCOPY)
+               v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
+               v.AddArg(dst)
+               v.AddArg(src)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETBE (FlagGT_UGT))
-       // cond:
-       // result: (MOVLconst [0])
+       // match: (Move [s] dst src mem)
+       // cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
+       // result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               v.reset(OpAMD64REPMOVSQ)
+               v.AddArg(dst)
+               v.AddArg(src)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+               v0.AuxInt = SizeAndAlign(s).Size() / 8
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETEQ (InvertFlags x))
+       // match: (Mul16  x y)
        // cond:
-       // result: (SETEQ x)
+       // result: (MULL  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETEQ)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64MULL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETEQ (FlagEQ))
+}
+func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32  x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (MULL  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64MULL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETEQ (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32F x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MULSS x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64MULSS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETEQ (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul64  x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MULQ  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64MULQ)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETEQ (FlagGT_ULT))
+}
+func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul64F x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MULSD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64MULSD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETEQ (FlagGT_UGT))
+}
+func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul8   x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (MULL  x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64MULL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETG (InvertFlags x))
+       // match: (Neg16  x)
        // cond:
-       // result: (SETL x)
+       // result: (NEGL x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETL)
+               x := v.Args[0]
+               v.reset(OpAMD64NEGL)
                v.AddArg(x)
                return true
        }
-       // match: (SETG (FlagEQ))
+}
+func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32  x)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (NEGL x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               v.reset(OpAMD64NEGL)
+               v.AddArg(x)
                return true
        }
-       // match: (SETG (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32F x)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               v.reset(OpAMD64PXOR)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
+               v0.AuxInt = f2i(math.Copysign(0, -1))
+               v.AddArg(v0)
                return true
        }
-       // match: (SETG (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64  x)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (NEGQ x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               v.reset(OpAMD64NEGQ)
+               v.AddArg(x)
                return true
        }
-       // match: (SETG (FlagGT_ULT))
+}
+func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64F x)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               v.reset(OpAMD64PXOR)
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
+               v0.AuxInt = f2i(math.Copysign(0, -1))
+               v.AddArg(v0)
                return true
        }
-       // match: (SETG (FlagGT_UGT))
+}
+func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg8   x)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (NEGL x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               v.reset(OpAMD64NEGL)
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETGE (InvertFlags x))
+       // match: (Neq16  x y)
        // cond:
-       // result: (SETLE x)
+       // result: (SETNE (CMPW x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETLE)
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETNE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETGE (FlagEQ))
+}
+func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32  x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SETNE (CMPL x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETNE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETGE (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32F x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (SETNEF (UCOMISS x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETNEF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETGE (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64  x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (SETNE (CMPQ x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETNE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETGE (FlagGT_ULT))
+}
+func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64F x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SETNEF (UCOMISD x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETNEF)
+               v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETGE (FlagGT_UGT))
+}
+func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq8   x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SETNE (CMPB x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETNE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETL (InvertFlags x))
+       // match: (NeqB   x y)
        // cond:
-       // result: (SETG x)
+       // result: (SETNE (CMPB x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETG)
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETNE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETL (FlagEQ))
+}
+func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqPtr x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (SETNE (CMPQ x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SETNE)
+               v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETL (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NilCheck ptr mem)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (LoweredNilCheck ptr mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpAMD64LoweredNilCheck)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SETL (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Not x)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (XORLconst [1] x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
+               x := v.Args[0]
+               v.reset(OpAMD64XORLconst)
                v.AuxInt = 1
+               v.AddArg(x)
                return true
        }
-       // match: (SETL (FlagGT_ULT))
-       // cond:
-       // result: (MOVLconst [0])
+}
+func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OffPtr [off] ptr)
+       // cond: is32Bit(off)
+       // result: (ADDQconst [off] ptr)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               if !(is32Bit(off)) {
                        break
                }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               v.reset(OpAMD64ADDQconst)
+               v.AuxInt = off
+               v.AddArg(ptr)
                return true
        }
-       // match: (SETL (FlagGT_UGT))
+       // match: (OffPtr [off] ptr)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ADDQ (MOVQconst [off]) ptr)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               off := v.AuxInt
+               ptr := v.Args[0]
+               v.reset(OpAMD64ADDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
+               v0.AuxInt = off
+               v.AddArg(v0)
+               v.AddArg(ptr)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETLE (InvertFlags x))
+       // match: (Or16 x y)
        // cond:
-       // result: (SETGE x)
+       // result: (ORL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETGE)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ORL)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETLE (FlagEQ))
+}
+func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or32 x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (ORL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ORL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETLE (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or64 x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (ORQ x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ORQ)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETLE (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or8  x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (ORL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ORL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETLE (FlagGT_ULT))
+}
+func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OrB x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ORL x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ORL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SETLE (FlagGT_UGT))
+}
+func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux16 <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v2.AuxInt = 16
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SETNE (InvertFlags x))
+       // match: (Rsh16Ux32 <t> x y)
        // cond:
-       // result: (SETNE x)
+       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64InvertFlags {
-                       break
-               }
-               x := v_0.Args[0]
-               v.reset(OpAMD64SETNE)
-               v.AddArg(x)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v2.AuxInt = 16
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETNE (FlagEQ))
+}
+func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux64 <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagEQ {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v2.AuxInt = 16
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETNE (FlagLT_ULT))
+}
+func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16Ux8  <t> x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v2.AuxInt = 16
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SETNE (FlagLT_UGT))
+}
+func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x16 <t> x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagLT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SARW)
+               v.Type = t
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v3.AuxInt = 16
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETNE (FlagGT_ULT))
+}
+func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x32 <t> x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_ULT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SARW)
+               v.Type = t
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v3.AuxInt = 16
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SETNE (FlagGT_UGT))
+}
+func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x64 <t> x y)
        // cond:
-       // result: (MOVLconst [1])
+       // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64FlagGT_UGT {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 1
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SARW)
+               v.Type = t
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v3.AuxInt = 16
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHLL x (MOVQconst [c]))
+       // match: (Rsh16x8  <t> x y)
        // cond:
-       // result: (SHLLconst [c&31] x)
+       // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHLLconst)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(OpAMD64SARW)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v3.AuxInt = 16
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SHLL x (MOVLconst [c]))
+}
+func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux16 <t> x y)
        // cond:
-       // result: (SHLLconst [c&31] x)
+       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHLLconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SHLL x (ANDLconst [31] y))
+}
+func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux32 <t> x y)
        // cond:
-       // result: (SHLL x y)
+       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ANDLconst {
-                       break
-               }
-               if v_1.AuxInt != 31 {
-                       break
-               }
-               y := v_1.Args[0]
-               v.reset(OpAMD64SHLL)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHLQ x (MOVQconst [c]))
+       // match: (Rsh32Ux64 <t> x y)
        // cond:
-       // result: (SHLQconst [c&63] x)
+       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHLQconst)
-               v.AuxInt = c & 63
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SHLQ x (MOVLconst [c]))
+}
+func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32Ux8  <t> x y)
        // cond:
-       // result: (SHLQconst [c&63] x)
+       // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHLQconst)
-               v.AuxInt = c & 63
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v2.AuxInt = 32
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SHLQ x (ANDQconst [63] y))
+}
+func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x16 <t> x y)
        // cond:
-       // result: (SHLQ x y)
+       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ANDQconst {
-                       break
-               }
-               if v_1.AuxInt != 63 {
-                       break
-               }
-               y := v_1.Args[0]
-               v.reset(OpAMD64SHLQ)
+               y := v.Args[1]
+               v.reset(OpAMD64SARL)
+               v.Type = t
                v.AddArg(x)
-               v.AddArg(y)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v3.AuxInt = 32
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHRB x (MOVQconst [c]))
+       // match: (Rsh32x32 <t> x y)
        // cond:
-       // result: (SHRBconst [c&31] x)
+       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHRBconst)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(OpAMD64SARL)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v3.AuxInt = 32
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SHRB x (MOVLconst [c]))
+}
+func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh32x64 <t> x y)
        // cond:
-       // result: (SHRBconst [c&31] x)
+       // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHRBconst)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(OpAMD64SARL)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v3.AuxInt = 32
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHRL x (MOVQconst [c]))
+       // match: (Rsh32x8  <t> x y)
        // cond:
-       // result: (SHRLconst [c&31] x)
+       // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHRLconst)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(OpAMD64SARL)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v3.AuxInt = 32
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SHRL x (MOVLconst [c]))
+}
+func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux16 <t> x y)
        // cond:
-       // result: (SHRLconst [c&31] x)
+       // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHRLconst)
-               v.AuxInt = c & 31
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SHRL x (ANDLconst [31] y))
+}
+func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux32 <t> x y)
        // cond:
-       // result: (SHRL x y)
+       // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ANDLconst {
-                       break
-               }
-               if v_1.AuxInt != 31 {
-                       break
-               }
-               y := v_1.Args[0]
-               v.reset(OpAMD64SHRL)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHRQ x (MOVQconst [c]))
+       // match: (Rsh64Ux64 <t> x y)
        // cond:
-       // result: (SHRQconst [c&63] x)
+       // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHRQconst)
-               v.AuxInt = c & 63
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SHRQ x (MOVLconst [c]))
+}
+func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64Ux8  <t> x y)
        // cond:
-       // result: (SHRQconst [c&63] x)
+       // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHRQconst)
-               v.AuxInt = c & 63
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDQ)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SHRQ x (ANDQconst [63] y))
+}
+func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64x16 <t> x y)
        // cond:
-       // result: (SHRQ x y)
+       // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64ANDQconst {
-                       break
-               }
-               if v_1.AuxInt != 63 {
-                       break
-               }
-               y := v_1.Args[0]
-               v.reset(OpAMD64SHRQ)
+               y := v.Args[1]
+               v.reset(OpAMD64SARQ)
+               v.Type = t
                v.AddArg(x)
-               v.AddArg(y)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SHRW x (MOVQconst [c]))
+       // match: (Rsh64x32 <t> x y)
        // cond:
-       // result: (SHRWconst [c&31] x)
+       // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHRWconst)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(OpAMD64SARQ)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SHRW x (MOVLconst [c]))
+}
+func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh64x64 <t> x y)
        // cond:
-       // result: (SHRWconst [c&31] x)
+       // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SHRWconst)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(OpAMD64SARQ)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBL x (MOVLconst [c]))
+       // match: (Rsh64x8  <t> x y)
        // cond:
-       // result: (SUBLconst x [c])
+       // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64SUBLconst)
+               y := v.Args[1]
+               v.reset(OpAMD64SARQ)
+               v.Type = t
                v.AddArg(x)
-               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v3.AuxInt = 64
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SUBL (MOVLconst [c]) x)
+}
+func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux16 <t> x y)
        // cond:
-       // result: (NEGL (SUBLconst <v.Type> x [c]))
+       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpAMD64NEGL)
-               v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
                v0.AddArg(x)
-               v0.AuxInt = c
+               v0.AddArg(y)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v2.AuxInt = 8
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUBL x x)
+}
+func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux32 <t> x y)
        // cond:
-       // result: (MOVLconst [0])
+       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
        for {
+               t := v.Type
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v2.AuxInt = 8
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBLconst [c] x)
-       // cond: int32(c) == 0
-       // result: x
-       for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(int32(c) == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (SUBLconst [c] x)
+       // match: (Rsh8Ux64 <t> x y)
        // cond:
-       // result: (ADDLconst [int64(int32(-c))] x)
+       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
        for {
-               c := v.AuxInt
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpAMD64ADDLconst)
-               v.AuxInt = int64(int32(-c))
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v2.AuxInt = 8
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBQ x (MOVQconst [c]))
-       // cond: is32Bit(c)
-       // result: (SUBQconst x [c])
+       // match: (Rsh8Ux8  <t> x y)
+       // cond:
+       // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(is32Bit(c)) {
-                       break
-               }
-               v.reset(OpAMD64SUBQconst)
-               v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (SUBQ (MOVQconst [c]) x)
-       // cond: is32Bit(c)
-       // result: (NEGQ (SUBQconst <v.Type> x [c]))
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(is32Bit(c)) {
-                       break
-               }
-               v.reset(OpAMD64NEGQ)
-               v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type)
+               y := v.Args[1]
+               v.reset(OpAMD64ANDL)
+               v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
                v0.AddArg(x)
-               v0.AuxInt = c
+               v0.AddArg(y)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
+               v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v2.AuxInt = 8
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUBQ x x)
+}
+func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x16 <t> x y)
        // cond:
-       // result: (MOVQconst [0])
+       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
        for {
+               t := v.Type
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpAMD64SARB)
+               v.Type = t
+               v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
+               v3.AuxInt = 8
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
+func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBQconst [0] x)
+       // match: (Rsh8x32 <t> x y)
        // cond:
-       // result: x
+       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v.Args[1]
+               v.reset(OpAMD64SARB)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
+               v3.AuxInt = 8
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SUBQconst [c] x)
-       // cond: c != -(1<<31)
-       // result: (ADDQconst [-c] x)
+}
+func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x64 <t> x y)
+       // cond:
+       // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
        for {
-               c := v.AuxInt
+               t := v.Type
                x := v.Args[0]
-               if !(c != -(1 << 31)) {
-                       break
-               }
-               v.reset(OpAMD64ADDQconst)
-               v.AuxInt = -c
+               y := v.Args[1]
+               v.reset(OpAMD64SARB)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
+               v3.AuxInt = 8
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       // match: (SUBQconst (MOVQconst [d]) [c])
+}
+func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x8  <t> x y)
        // cond:
-       // result: (MOVQconst [d-c])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               c := v.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = d - c
-               return true
-       }
-       // match: (SUBQconst (SUBQconst x [d]) [c])
-       // cond: is32Bit(-c-d)
-       // result: (ADDQconst [-c-d] x)
+       // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64SUBQconst {
-                       break
-               }
-               x := v_0.Args[0]
-               d := v_0.AuxInt
-               c := v.AuxInt
-               if !(is32Bit(-c - d)) {
-                       break
-               }
-               v.reset(OpAMD64ADDQconst)
-               v.AuxInt = -c - d
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpAMD64SARB)
+               v.Type = t
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
+               v0.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
+               v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
+               v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
+               v3.AuxInt = 8
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v1.AddArg(v2)
+               v0.AddArg(v1)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
 func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
        b := v.Block
@@ -16875,203 +17072,6 @@ func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (XORL x (MOVLconst [c]))
-       // cond:
-       // result: (XORLconst [c] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpAMD64XORLconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORL (MOVLconst [c]) x)
-       // cond:
-       // result: (XORLconst [c] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpAMD64XORLconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORL x x)
-       // cond:
-       // result: (MOVLconst [0])
-       for {
-               x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = 0
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (XORLconst [c] (XORLconst [d] x))
-       // cond:
-       // result: (XORLconst [c ^ d] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64XORLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64XORLconst)
-               v.AuxInt = c ^ d
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORLconst [c] x)
-       // cond: int32(c)==0
-       // result: x
-       for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(int32(c) == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORLconst [c] (MOVLconst [d]))
-       // cond:
-       // result: (MOVLconst [c^d])
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVLconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpAMD64MOVLconst)
-               v.AuxInt = c ^ d
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (XORQ x (MOVQconst [c]))
-       // cond: is32Bit(c)
-       // result: (XORQconst [c] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(is32Bit(c)) {
-                       break
-               }
-               v.reset(OpAMD64XORQconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORQ (MOVQconst [c]) x)
-       // cond: is32Bit(c)
-       // result: (XORQconst [c] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(is32Bit(c)) {
-                       break
-               }
-               v.reset(OpAMD64XORQconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORQ x x)
-       // cond:
-       // result: (MOVQconst [0])
-       for {
-               x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = 0
-               return true
-       }
-       return false
-}
-func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (XORQconst [c] (XORQconst [d] x))
-       // cond:
-       // result: (XORQconst [c ^ d] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64XORQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpAMD64XORQconst)
-               v.AuxInt = c ^ d
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORQconst [0] x)
-       // cond:
-       // result: x
-       for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (XORQconst [c] (MOVQconst [d]))
-       // cond:
-       // result: (MOVQconst [c^d])
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpAMD64MOVQconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpAMD64MOVQconst)
-               v.AuxInt = c ^ d
-               return true
-       }
-       return false
-}
 func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
@@ -17306,8 +17306,8 @@ func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
                v.reset(OpZero)
                v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
                v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
-               v0.AddArg(destptr)
                v0.AuxInt = SizeAndAlign(s).Size() % 8
+               v0.AddArg(destptr)
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
                v1.AuxInt = 0
index eb000d7460fd0a56b2a1c856741b38a3361fa7ed..a4659e40bdd35e9a340b2856139ef4d5e7f28ce3 100644 (file)
@@ -70,32 +70,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMANDshiftRL(v, config)
        case OpARMANDshiftRLreg:
                return rewriteValueARM_OpARMANDshiftRLreg(v, config)
-       case OpAdd16:
-               return rewriteValueARM_OpAdd16(v, config)
-       case OpAdd32:
-               return rewriteValueARM_OpAdd32(v, config)
-       case OpAdd32F:
-               return rewriteValueARM_OpAdd32F(v, config)
-       case OpAdd32carry:
-               return rewriteValueARM_OpAdd32carry(v, config)
-       case OpAdd32withcarry:
-               return rewriteValueARM_OpAdd32withcarry(v, config)
-       case OpAdd64F:
-               return rewriteValueARM_OpAdd64F(v, config)
-       case OpAdd8:
-               return rewriteValueARM_OpAdd8(v, config)
-       case OpAddPtr:
-               return rewriteValueARM_OpAddPtr(v, config)
-       case OpAddr:
-               return rewriteValueARM_OpAddr(v, config)
-       case OpAnd16:
-               return rewriteValueARM_OpAnd16(v, config)
-       case OpAnd32:
-               return rewriteValueARM_OpAnd32(v, config)
-       case OpAnd8:
-               return rewriteValueARM_OpAnd8(v, config)
-       case OpAndB:
-               return rewriteValueARM_OpAndB(v, config)
        case OpARMBIC:
                return rewriteValueARM_OpARMBIC(v, config)
        case OpARMBICconst:
@@ -136,124 +110,12 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMCMPshiftRL(v, config)
        case OpARMCMPshiftRLreg:
                return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
-       case OpClosureCall:
-               return rewriteValueARM_OpClosureCall(v, config)
-       case OpCom16:
-               return rewriteValueARM_OpCom16(v, config)
-       case OpCom32:
-               return rewriteValueARM_OpCom32(v, config)
-       case OpCom8:
-               return rewriteValueARM_OpCom8(v, config)
-       case OpConst16:
-               return rewriteValueARM_OpConst16(v, config)
-       case OpConst32:
-               return rewriteValueARM_OpConst32(v, config)
-       case OpConst32F:
-               return rewriteValueARM_OpConst32F(v, config)
-       case OpConst64F:
-               return rewriteValueARM_OpConst64F(v, config)
-       case OpConst8:
-               return rewriteValueARM_OpConst8(v, config)
-       case OpConstBool:
-               return rewriteValueARM_OpConstBool(v, config)
-       case OpConstNil:
-               return rewriteValueARM_OpConstNil(v, config)
-       case OpConvert:
-               return rewriteValueARM_OpConvert(v, config)
-       case OpCvt32Fto32:
-               return rewriteValueARM_OpCvt32Fto32(v, config)
-       case OpCvt32Fto32U:
-               return rewriteValueARM_OpCvt32Fto32U(v, config)
-       case OpCvt32Fto64F:
-               return rewriteValueARM_OpCvt32Fto64F(v, config)
-       case OpCvt32Uto32F:
-               return rewriteValueARM_OpCvt32Uto32F(v, config)
-       case OpCvt32Uto64F:
-               return rewriteValueARM_OpCvt32Uto64F(v, config)
-       case OpCvt32to32F:
-               return rewriteValueARM_OpCvt32to32F(v, config)
-       case OpCvt32to64F:
-               return rewriteValueARM_OpCvt32to64F(v, config)
-       case OpCvt64Fto32:
-               return rewriteValueARM_OpCvt64Fto32(v, config)
-       case OpCvt64Fto32F:
-               return rewriteValueARM_OpCvt64Fto32F(v, config)
-       case OpCvt64Fto32U:
-               return rewriteValueARM_OpCvt64Fto32U(v, config)
        case OpARMDIV:
                return rewriteValueARM_OpARMDIV(v, config)
        case OpARMDIVU:
                return rewriteValueARM_OpARMDIVU(v, config)
-       case OpDeferCall:
-               return rewriteValueARM_OpDeferCall(v, config)
-       case OpDiv16:
-               return rewriteValueARM_OpDiv16(v, config)
-       case OpDiv16u:
-               return rewriteValueARM_OpDiv16u(v, config)
-       case OpDiv32:
-               return rewriteValueARM_OpDiv32(v, config)
-       case OpDiv32F:
-               return rewriteValueARM_OpDiv32F(v, config)
-       case OpDiv32u:
-               return rewriteValueARM_OpDiv32u(v, config)
-       case OpDiv64F:
-               return rewriteValueARM_OpDiv64F(v, config)
-       case OpDiv8:
-               return rewriteValueARM_OpDiv8(v, config)
-       case OpDiv8u:
-               return rewriteValueARM_OpDiv8u(v, config)
-       case OpEq16:
-               return rewriteValueARM_OpEq16(v, config)
-       case OpEq32:
-               return rewriteValueARM_OpEq32(v, config)
-       case OpEq32F:
-               return rewriteValueARM_OpEq32F(v, config)
-       case OpEq64F:
-               return rewriteValueARM_OpEq64F(v, config)
-       case OpEq8:
-               return rewriteValueARM_OpEq8(v, config)
-       case OpEqB:
-               return rewriteValueARM_OpEqB(v, config)
-       case OpEqPtr:
-               return rewriteValueARM_OpEqPtr(v, config)
        case OpARMEqual:
                return rewriteValueARM_OpARMEqual(v, config)
-       case OpGeq16:
-               return rewriteValueARM_OpGeq16(v, config)
-       case OpGeq16U:
-               return rewriteValueARM_OpGeq16U(v, config)
-       case OpGeq32:
-               return rewriteValueARM_OpGeq32(v, config)
-       case OpGeq32F:
-               return rewriteValueARM_OpGeq32F(v, config)
-       case OpGeq32U:
-               return rewriteValueARM_OpGeq32U(v, config)
-       case OpGeq64F:
-               return rewriteValueARM_OpGeq64F(v, config)
-       case OpGeq8:
-               return rewriteValueARM_OpGeq8(v, config)
-       case OpGeq8U:
-               return rewriteValueARM_OpGeq8U(v, config)
-       case OpGetClosurePtr:
-               return rewriteValueARM_OpGetClosurePtr(v, config)
-       case OpGoCall:
-               return rewriteValueARM_OpGoCall(v, config)
-       case OpGreater16:
-               return rewriteValueARM_OpGreater16(v, config)
-       case OpGreater16U:
-               return rewriteValueARM_OpGreater16U(v, config)
-       case OpGreater32:
-               return rewriteValueARM_OpGreater32(v, config)
-       case OpGreater32F:
-               return rewriteValueARM_OpGreater32F(v, config)
-       case OpGreater32U:
-               return rewriteValueARM_OpGreater32U(v, config)
-       case OpGreater64F:
-               return rewriteValueARM_OpGreater64F(v, config)
-       case OpGreater8:
-               return rewriteValueARM_OpGreater8(v, config)
-       case OpGreater8U:
-               return rewriteValueARM_OpGreater8U(v, config)
        case OpARMGreaterEqual:
                return rewriteValueARM_OpARMGreaterEqual(v, config)
        case OpARMGreaterEqualU:
@@ -262,58 +124,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMGreaterThan(v, config)
        case OpARMGreaterThanU:
                return rewriteValueARM_OpARMGreaterThanU(v, config)
-       case OpHmul16:
-               return rewriteValueARM_OpHmul16(v, config)
-       case OpHmul16u:
-               return rewriteValueARM_OpHmul16u(v, config)
-       case OpHmul32:
-               return rewriteValueARM_OpHmul32(v, config)
-       case OpHmul32u:
-               return rewriteValueARM_OpHmul32u(v, config)
-       case OpHmul8:
-               return rewriteValueARM_OpHmul8(v, config)
-       case OpHmul8u:
-               return rewriteValueARM_OpHmul8u(v, config)
-       case OpInterCall:
-               return rewriteValueARM_OpInterCall(v, config)
-       case OpIsInBounds:
-               return rewriteValueARM_OpIsInBounds(v, config)
-       case OpIsNonNil:
-               return rewriteValueARM_OpIsNonNil(v, config)
-       case OpIsSliceInBounds:
-               return rewriteValueARM_OpIsSliceInBounds(v, config)
-       case OpLeq16:
-               return rewriteValueARM_OpLeq16(v, config)
-       case OpLeq16U:
-               return rewriteValueARM_OpLeq16U(v, config)
-       case OpLeq32:
-               return rewriteValueARM_OpLeq32(v, config)
-       case OpLeq32F:
-               return rewriteValueARM_OpLeq32F(v, config)
-       case OpLeq32U:
-               return rewriteValueARM_OpLeq32U(v, config)
-       case OpLeq64F:
-               return rewriteValueARM_OpLeq64F(v, config)
-       case OpLeq8:
-               return rewriteValueARM_OpLeq8(v, config)
-       case OpLeq8U:
-               return rewriteValueARM_OpLeq8U(v, config)
-       case OpLess16:
-               return rewriteValueARM_OpLess16(v, config)
-       case OpLess16U:
-               return rewriteValueARM_OpLess16U(v, config)
-       case OpLess32:
-               return rewriteValueARM_OpLess32(v, config)
-       case OpLess32F:
-               return rewriteValueARM_OpLess32F(v, config)
-       case OpLess32U:
-               return rewriteValueARM_OpLess32U(v, config)
-       case OpLess64F:
-               return rewriteValueARM_OpLess64F(v, config)
-       case OpLess8:
-               return rewriteValueARM_OpLess8(v, config)
-       case OpLess8U:
-               return rewriteValueARM_OpLess8U(v, config)
        case OpARMLessEqual:
                return rewriteValueARM_OpARMLessEqual(v, config)
        case OpARMLessEqualU:
@@ -322,38 +132,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMLessThan(v, config)
        case OpARMLessThanU:
                return rewriteValueARM_OpARMLessThanU(v, config)
-       case OpLoad:
-               return rewriteValueARM_OpLoad(v, config)
-       case OpLrot16:
-               return rewriteValueARM_OpLrot16(v, config)
-       case OpLrot32:
-               return rewriteValueARM_OpLrot32(v, config)
-       case OpLrot8:
-               return rewriteValueARM_OpLrot8(v, config)
-       case OpLsh16x16:
-               return rewriteValueARM_OpLsh16x16(v, config)
-       case OpLsh16x32:
-               return rewriteValueARM_OpLsh16x32(v, config)
-       case OpLsh16x64:
-               return rewriteValueARM_OpLsh16x64(v, config)
-       case OpLsh16x8:
-               return rewriteValueARM_OpLsh16x8(v, config)
-       case OpLsh32x16:
-               return rewriteValueARM_OpLsh32x16(v, config)
-       case OpLsh32x32:
-               return rewriteValueARM_OpLsh32x32(v, config)
-       case OpLsh32x64:
-               return rewriteValueARM_OpLsh32x64(v, config)
-       case OpLsh32x8:
-               return rewriteValueARM_OpLsh32x8(v, config)
-       case OpLsh8x16:
-               return rewriteValueARM_OpLsh8x16(v, config)
-       case OpLsh8x32:
-               return rewriteValueARM_OpLsh8x32(v, config)
-       case OpLsh8x64:
-               return rewriteValueARM_OpLsh8x64(v, config)
-       case OpLsh8x8:
-               return rewriteValueARM_OpLsh8x8(v, config)
        case OpARMMOVBUload:
                return rewriteValueARM_OpARMMOVBUload(v, config)
        case OpARMMOVBUreg:
@@ -422,60 +200,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMMVNshiftRL(v, config)
        case OpARMMVNshiftRLreg:
                return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
-       case OpMod16:
-               return rewriteValueARM_OpMod16(v, config)
-       case OpMod16u:
-               return rewriteValueARM_OpMod16u(v, config)
-       case OpMod32:
-               return rewriteValueARM_OpMod32(v, config)
-       case OpMod32u:
-               return rewriteValueARM_OpMod32u(v, config)
-       case OpMod8:
-               return rewriteValueARM_OpMod8(v, config)
-       case OpMod8u:
-               return rewriteValueARM_OpMod8u(v, config)
-       case OpMove:
-               return rewriteValueARM_OpMove(v, config)
-       case OpMul16:
-               return rewriteValueARM_OpMul16(v, config)
-       case OpMul32:
-               return rewriteValueARM_OpMul32(v, config)
-       case OpMul32F:
-               return rewriteValueARM_OpMul32F(v, config)
-       case OpMul32uhilo:
-               return rewriteValueARM_OpMul32uhilo(v, config)
-       case OpMul64F:
-               return rewriteValueARM_OpMul64F(v, config)
-       case OpMul8:
-               return rewriteValueARM_OpMul8(v, config)
-       case OpNeg16:
-               return rewriteValueARM_OpNeg16(v, config)
-       case OpNeg32:
-               return rewriteValueARM_OpNeg32(v, config)
-       case OpNeg32F:
-               return rewriteValueARM_OpNeg32F(v, config)
-       case OpNeg64F:
-               return rewriteValueARM_OpNeg64F(v, config)
-       case OpNeg8:
-               return rewriteValueARM_OpNeg8(v, config)
-       case OpNeq16:
-               return rewriteValueARM_OpNeq16(v, config)
-       case OpNeq32:
-               return rewriteValueARM_OpNeq32(v, config)
-       case OpNeq32F:
-               return rewriteValueARM_OpNeq32F(v, config)
-       case OpNeq64F:
-               return rewriteValueARM_OpNeq64F(v, config)
-       case OpNeq8:
-               return rewriteValueARM_OpNeq8(v, config)
-       case OpNeqB:
-               return rewriteValueARM_OpNeqB(v, config)
-       case OpNeqPtr:
-               return rewriteValueARM_OpNeqPtr(v, config)
-       case OpNilCheck:
-               return rewriteValueARM_OpNilCheck(v, config)
-       case OpNot:
-               return rewriteValueARM_OpNot(v, config)
        case OpARMNotEqual:
                return rewriteValueARM_OpARMNotEqual(v, config)
        case OpARMOR:
@@ -494,16 +218,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMORshiftRL(v, config)
        case OpARMORshiftRLreg:
                return rewriteValueARM_OpARMORshiftRLreg(v, config)
-       case OpOffPtr:
-               return rewriteValueARM_OpOffPtr(v, config)
-       case OpOr16:
-               return rewriteValueARM_OpOr16(v, config)
-       case OpOr32:
-               return rewriteValueARM_OpOr32(v, config)
-       case OpOr8:
-               return rewriteValueARM_OpOr8(v, config)
-       case OpOrB:
-               return rewriteValueARM_OpOrB(v, config)
        case OpARMRSB:
                return rewriteValueARM_OpARMRSB(v, config)
        case OpARMRSBSshiftLL:
@@ -546,54 +260,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMRSCshiftRL(v, config)
        case OpARMRSCshiftRLreg:
                return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
-       case OpRsh16Ux16:
-               return rewriteValueARM_OpRsh16Ux16(v, config)
-       case OpRsh16Ux32:
-               return rewriteValueARM_OpRsh16Ux32(v, config)
-       case OpRsh16Ux64:
-               return rewriteValueARM_OpRsh16Ux64(v, config)
-       case OpRsh16Ux8:
-               return rewriteValueARM_OpRsh16Ux8(v, config)
-       case OpRsh16x16:
-               return rewriteValueARM_OpRsh16x16(v, config)
-       case OpRsh16x32:
-               return rewriteValueARM_OpRsh16x32(v, config)
-       case OpRsh16x64:
-               return rewriteValueARM_OpRsh16x64(v, config)
-       case OpRsh16x8:
-               return rewriteValueARM_OpRsh16x8(v, config)
-       case OpRsh32Ux16:
-               return rewriteValueARM_OpRsh32Ux16(v, config)
-       case OpRsh32Ux32:
-               return rewriteValueARM_OpRsh32Ux32(v, config)
-       case OpRsh32Ux64:
-               return rewriteValueARM_OpRsh32Ux64(v, config)
-       case OpRsh32Ux8:
-               return rewriteValueARM_OpRsh32Ux8(v, config)
-       case OpRsh32x16:
-               return rewriteValueARM_OpRsh32x16(v, config)
-       case OpRsh32x32:
-               return rewriteValueARM_OpRsh32x32(v, config)
-       case OpRsh32x64:
-               return rewriteValueARM_OpRsh32x64(v, config)
-       case OpRsh32x8:
-               return rewriteValueARM_OpRsh32x8(v, config)
-       case OpRsh8Ux16:
-               return rewriteValueARM_OpRsh8Ux16(v, config)
-       case OpRsh8Ux32:
-               return rewriteValueARM_OpRsh8Ux32(v, config)
-       case OpRsh8Ux64:
-               return rewriteValueARM_OpRsh8Ux64(v, config)
-       case OpRsh8Ux8:
-               return rewriteValueARM_OpRsh8Ux8(v, config)
-       case OpRsh8x16:
-               return rewriteValueARM_OpRsh8x16(v, config)
-       case OpRsh8x32:
-               return rewriteValueARM_OpRsh8x32(v, config)
-       case OpRsh8x64:
-               return rewriteValueARM_OpRsh8x64(v, config)
-       case OpRsh8x8:
-               return rewriteValueARM_OpRsh8x8(v, config)
        case OpARMSBC:
                return rewriteValueARM_OpARMSBC(v, config)
        case OpARMSBCconst:
@@ -654,42 +320,6 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMSUBshiftRL(v, config)
        case OpARMSUBshiftRLreg:
                return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
-       case OpSignExt16to32:
-               return rewriteValueARM_OpSignExt16to32(v, config)
-       case OpSignExt8to16:
-               return rewriteValueARM_OpSignExt8to16(v, config)
-       case OpSignExt8to32:
-               return rewriteValueARM_OpSignExt8to32(v, config)
-       case OpSignmask:
-               return rewriteValueARM_OpSignmask(v, config)
-       case OpSqrt:
-               return rewriteValueARM_OpSqrt(v, config)
-       case OpStaticCall:
-               return rewriteValueARM_OpStaticCall(v, config)
-       case OpStore:
-               return rewriteValueARM_OpStore(v, config)
-       case OpSub16:
-               return rewriteValueARM_OpSub16(v, config)
-       case OpSub32:
-               return rewriteValueARM_OpSub32(v, config)
-       case OpSub32F:
-               return rewriteValueARM_OpSub32F(v, config)
-       case OpSub32carry:
-               return rewriteValueARM_OpSub32carry(v, config)
-       case OpSub32withcarry:
-               return rewriteValueARM_OpSub32withcarry(v, config)
-       case OpSub64F:
-               return rewriteValueARM_OpSub64F(v, config)
-       case OpSub8:
-               return rewriteValueARM_OpSub8(v, config)
-       case OpSubPtr:
-               return rewriteValueARM_OpSubPtr(v, config)
-       case OpTrunc16to8:
-               return rewriteValueARM_OpTrunc16to8(v, config)
-       case OpTrunc32to16:
-               return rewriteValueARM_OpTrunc32to16(v, config)
-       case OpTrunc32to8:
-               return rewriteValueARM_OpTrunc32to8(v, config)
        case OpARMXOR:
                return rewriteValueARM_OpARMXOR(v, config)
        case OpARMXORconst:
@@ -706,154 +336,524 @@ func rewriteValueARM(v *Value, config *Config) bool {
                return rewriteValueARM_OpARMXORshiftRL(v, config)
        case OpARMXORshiftRLreg:
                return rewriteValueARM_OpARMXORshiftRLreg(v, config)
-       case OpXor16:
-               return rewriteValueARM_OpXor16(v, config)
-       case OpXor32:
-               return rewriteValueARM_OpXor32(v, config)
-       case OpXor8:
-               return rewriteValueARM_OpXor8(v, config)
-       case OpZero:
-               return rewriteValueARM_OpZero(v, config)
-       case OpZeroExt16to32:
-               return rewriteValueARM_OpZeroExt16to32(v, config)
-       case OpZeroExt8to16:
-               return rewriteValueARM_OpZeroExt8to16(v, config)
-       case OpZeroExt8to32:
-               return rewriteValueARM_OpZeroExt8to32(v, config)
-       case OpZeromask:
-               return rewriteValueARM_OpZeromask(v, config)
-       }
-       return false
-}
-func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ADC (MOVWconst [c]) x flags)
-       // cond:
-       // result: (ADCconst [c] x flags)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMADCconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               v.AddArg(flags)
-               return true
-       }
-       // match: (ADC x (MOVWconst [c]) flags)
-       // cond:
-       // result: (ADCconst [c] x flags)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMADCconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               v.AddArg(flags)
-               return true
-       }
-       // match: (ADC x (SLLconst [c] y) flags)
-       // cond:
-       // result: (ADCshiftLL x y [c] flags)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               flags := v.Args[2]
-               v.reset(OpARMADCshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
-               return true
-       }
-       // match: (ADC (SLLconst [c] y) x flags)
-       // cond:
-       // result: (ADCshiftLL x y [c] flags)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMADCshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
-               return true
-       }
-       // match: (ADC x (SRLconst [c] y) flags)
-       // cond:
-       // result: (ADCshiftRL x y [c] flags)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               flags := v.Args[2]
-               v.reset(OpARMADCshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
-               return true
-       }
-       // match: (ADC (SRLconst [c] y) x flags)
-       // cond:
-       // result: (ADCshiftRL x y [c] flags)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMADCshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
-               return true
-       }
-       // match: (ADC x (SRAconst [c] y) flags)
-       // cond:
-       // result: (ADCshiftRA x y [c] flags)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
-                       break
-               }
-               c := v_1.AuxInt
+       case OpAdd16:
+               return rewriteValueARM_OpAdd16(v, config)
+       case OpAdd32:
+               return rewriteValueARM_OpAdd32(v, config)
+       case OpAdd32F:
+               return rewriteValueARM_OpAdd32F(v, config)
+       case OpAdd32carry:
+               return rewriteValueARM_OpAdd32carry(v, config)
+       case OpAdd32withcarry:
+               return rewriteValueARM_OpAdd32withcarry(v, config)
+       case OpAdd64F:
+               return rewriteValueARM_OpAdd64F(v, config)
+       case OpAdd8:
+               return rewriteValueARM_OpAdd8(v, config)
+       case OpAddPtr:
+               return rewriteValueARM_OpAddPtr(v, config)
+       case OpAddr:
+               return rewriteValueARM_OpAddr(v, config)
+       case OpAnd16:
+               return rewriteValueARM_OpAnd16(v, config)
+       case OpAnd32:
+               return rewriteValueARM_OpAnd32(v, config)
+       case OpAnd8:
+               return rewriteValueARM_OpAnd8(v, config)
+       case OpAndB:
+               return rewriteValueARM_OpAndB(v, config)
+       case OpClosureCall:
+               return rewriteValueARM_OpClosureCall(v, config)
+       case OpCom16:
+               return rewriteValueARM_OpCom16(v, config)
+       case OpCom32:
+               return rewriteValueARM_OpCom32(v, config)
+       case OpCom8:
+               return rewriteValueARM_OpCom8(v, config)
+       case OpConst16:
+               return rewriteValueARM_OpConst16(v, config)
+       case OpConst32:
+               return rewriteValueARM_OpConst32(v, config)
+       case OpConst32F:
+               return rewriteValueARM_OpConst32F(v, config)
+       case OpConst64F:
+               return rewriteValueARM_OpConst64F(v, config)
+       case OpConst8:
+               return rewriteValueARM_OpConst8(v, config)
+       case OpConstBool:
+               return rewriteValueARM_OpConstBool(v, config)
+       case OpConstNil:
+               return rewriteValueARM_OpConstNil(v, config)
+       case OpConvert:
+               return rewriteValueARM_OpConvert(v, config)
+       case OpCvt32Fto32:
+               return rewriteValueARM_OpCvt32Fto32(v, config)
+       case OpCvt32Fto32U:
+               return rewriteValueARM_OpCvt32Fto32U(v, config)
+       case OpCvt32Fto64F:
+               return rewriteValueARM_OpCvt32Fto64F(v, config)
+       case OpCvt32Uto32F:
+               return rewriteValueARM_OpCvt32Uto32F(v, config)
+       case OpCvt32Uto64F:
+               return rewriteValueARM_OpCvt32Uto64F(v, config)
+       case OpCvt32to32F:
+               return rewriteValueARM_OpCvt32to32F(v, config)
+       case OpCvt32to64F:
+               return rewriteValueARM_OpCvt32to64F(v, config)
+       case OpCvt64Fto32:
+               return rewriteValueARM_OpCvt64Fto32(v, config)
+       case OpCvt64Fto32F:
+               return rewriteValueARM_OpCvt64Fto32F(v, config)
+       case OpCvt64Fto32U:
+               return rewriteValueARM_OpCvt64Fto32U(v, config)
+       case OpDeferCall:
+               return rewriteValueARM_OpDeferCall(v, config)
+       case OpDiv16:
+               return rewriteValueARM_OpDiv16(v, config)
+       case OpDiv16u:
+               return rewriteValueARM_OpDiv16u(v, config)
+       case OpDiv32:
+               return rewriteValueARM_OpDiv32(v, config)
+       case OpDiv32F:
+               return rewriteValueARM_OpDiv32F(v, config)
+       case OpDiv32u:
+               return rewriteValueARM_OpDiv32u(v, config)
+       case OpDiv64F:
+               return rewriteValueARM_OpDiv64F(v, config)
+       case OpDiv8:
+               return rewriteValueARM_OpDiv8(v, config)
+       case OpDiv8u:
+               return rewriteValueARM_OpDiv8u(v, config)
+       case OpEq16:
+               return rewriteValueARM_OpEq16(v, config)
+       case OpEq32:
+               return rewriteValueARM_OpEq32(v, config)
+       case OpEq32F:
+               return rewriteValueARM_OpEq32F(v, config)
+       case OpEq64F:
+               return rewriteValueARM_OpEq64F(v, config)
+       case OpEq8:
+               return rewriteValueARM_OpEq8(v, config)
+       case OpEqB:
+               return rewriteValueARM_OpEqB(v, config)
+       case OpEqPtr:
+               return rewriteValueARM_OpEqPtr(v, config)
+       case OpGeq16:
+               return rewriteValueARM_OpGeq16(v, config)
+       case OpGeq16U:
+               return rewriteValueARM_OpGeq16U(v, config)
+       case OpGeq32:
+               return rewriteValueARM_OpGeq32(v, config)
+       case OpGeq32F:
+               return rewriteValueARM_OpGeq32F(v, config)
+       case OpGeq32U:
+               return rewriteValueARM_OpGeq32U(v, config)
+       case OpGeq64F:
+               return rewriteValueARM_OpGeq64F(v, config)
+       case OpGeq8:
+               return rewriteValueARM_OpGeq8(v, config)
+       case OpGeq8U:
+               return rewriteValueARM_OpGeq8U(v, config)
+       case OpGetClosurePtr:
+               return rewriteValueARM_OpGetClosurePtr(v, config)
+       case OpGoCall:
+               return rewriteValueARM_OpGoCall(v, config)
+       case OpGreater16:
+               return rewriteValueARM_OpGreater16(v, config)
+       case OpGreater16U:
+               return rewriteValueARM_OpGreater16U(v, config)
+       case OpGreater32:
+               return rewriteValueARM_OpGreater32(v, config)
+       case OpGreater32F:
+               return rewriteValueARM_OpGreater32F(v, config)
+       case OpGreater32U:
+               return rewriteValueARM_OpGreater32U(v, config)
+       case OpGreater64F:
+               return rewriteValueARM_OpGreater64F(v, config)
+       case OpGreater8:
+               return rewriteValueARM_OpGreater8(v, config)
+       case OpGreater8U:
+               return rewriteValueARM_OpGreater8U(v, config)
+       case OpHmul16:
+               return rewriteValueARM_OpHmul16(v, config)
+       case OpHmul16u:
+               return rewriteValueARM_OpHmul16u(v, config)
+       case OpHmul32:
+               return rewriteValueARM_OpHmul32(v, config)
+       case OpHmul32u:
+               return rewriteValueARM_OpHmul32u(v, config)
+       case OpHmul8:
+               return rewriteValueARM_OpHmul8(v, config)
+       case OpHmul8u:
+               return rewriteValueARM_OpHmul8u(v, config)
+       case OpInterCall:
+               return rewriteValueARM_OpInterCall(v, config)
+       case OpIsInBounds:
+               return rewriteValueARM_OpIsInBounds(v, config)
+       case OpIsNonNil:
+               return rewriteValueARM_OpIsNonNil(v, config)
+       case OpIsSliceInBounds:
+               return rewriteValueARM_OpIsSliceInBounds(v, config)
+       case OpLeq16:
+               return rewriteValueARM_OpLeq16(v, config)
+       case OpLeq16U:
+               return rewriteValueARM_OpLeq16U(v, config)
+       case OpLeq32:
+               return rewriteValueARM_OpLeq32(v, config)
+       case OpLeq32F:
+               return rewriteValueARM_OpLeq32F(v, config)
+       case OpLeq32U:
+               return rewriteValueARM_OpLeq32U(v, config)
+       case OpLeq64F:
+               return rewriteValueARM_OpLeq64F(v, config)
+       case OpLeq8:
+               return rewriteValueARM_OpLeq8(v, config)
+       case OpLeq8U:
+               return rewriteValueARM_OpLeq8U(v, config)
+       case OpLess16:
+               return rewriteValueARM_OpLess16(v, config)
+       case OpLess16U:
+               return rewriteValueARM_OpLess16U(v, config)
+       case OpLess32:
+               return rewriteValueARM_OpLess32(v, config)
+       case OpLess32F:
+               return rewriteValueARM_OpLess32F(v, config)
+       case OpLess32U:
+               return rewriteValueARM_OpLess32U(v, config)
+       case OpLess64F:
+               return rewriteValueARM_OpLess64F(v, config)
+       case OpLess8:
+               return rewriteValueARM_OpLess8(v, config)
+       case OpLess8U:
+               return rewriteValueARM_OpLess8U(v, config)
+       case OpLoad:
+               return rewriteValueARM_OpLoad(v, config)
+       case OpLrot16:
+               return rewriteValueARM_OpLrot16(v, config)
+       case OpLrot32:
+               return rewriteValueARM_OpLrot32(v, config)
+       case OpLrot8:
+               return rewriteValueARM_OpLrot8(v, config)
+       case OpLsh16x16:
+               return rewriteValueARM_OpLsh16x16(v, config)
+       case OpLsh16x32:
+               return rewriteValueARM_OpLsh16x32(v, config)
+       case OpLsh16x64:
+               return rewriteValueARM_OpLsh16x64(v, config)
+       case OpLsh16x8:
+               return rewriteValueARM_OpLsh16x8(v, config)
+       case OpLsh32x16:
+               return rewriteValueARM_OpLsh32x16(v, config)
+       case OpLsh32x32:
+               return rewriteValueARM_OpLsh32x32(v, config)
+       case OpLsh32x64:
+               return rewriteValueARM_OpLsh32x64(v, config)
+       case OpLsh32x8:
+               return rewriteValueARM_OpLsh32x8(v, config)
+       case OpLsh8x16:
+               return rewriteValueARM_OpLsh8x16(v, config)
+       case OpLsh8x32:
+               return rewriteValueARM_OpLsh8x32(v, config)
+       case OpLsh8x64:
+               return rewriteValueARM_OpLsh8x64(v, config)
+       case OpLsh8x8:
+               return rewriteValueARM_OpLsh8x8(v, config)
+       case OpMod16:
+               return rewriteValueARM_OpMod16(v, config)
+       case OpMod16u:
+               return rewriteValueARM_OpMod16u(v, config)
+       case OpMod32:
+               return rewriteValueARM_OpMod32(v, config)
+       case OpMod32u:
+               return rewriteValueARM_OpMod32u(v, config)
+       case OpMod8:
+               return rewriteValueARM_OpMod8(v, config)
+       case OpMod8u:
+               return rewriteValueARM_OpMod8u(v, config)
+       case OpMove:
+               return rewriteValueARM_OpMove(v, config)
+       case OpMul16:
+               return rewriteValueARM_OpMul16(v, config)
+       case OpMul32:
+               return rewriteValueARM_OpMul32(v, config)
+       case OpMul32F:
+               return rewriteValueARM_OpMul32F(v, config)
+       case OpMul32uhilo:
+               return rewriteValueARM_OpMul32uhilo(v, config)
+       case OpMul64F:
+               return rewriteValueARM_OpMul64F(v, config)
+       case OpMul8:
+               return rewriteValueARM_OpMul8(v, config)
+       case OpNeg16:
+               return rewriteValueARM_OpNeg16(v, config)
+       case OpNeg32:
+               return rewriteValueARM_OpNeg32(v, config)
+       case OpNeg32F:
+               return rewriteValueARM_OpNeg32F(v, config)
+       case OpNeg64F:
+               return rewriteValueARM_OpNeg64F(v, config)
+       case OpNeg8:
+               return rewriteValueARM_OpNeg8(v, config)
+       case OpNeq16:
+               return rewriteValueARM_OpNeq16(v, config)
+       case OpNeq32:
+               return rewriteValueARM_OpNeq32(v, config)
+       case OpNeq32F:
+               return rewriteValueARM_OpNeq32F(v, config)
+       case OpNeq64F:
+               return rewriteValueARM_OpNeq64F(v, config)
+       case OpNeq8:
+               return rewriteValueARM_OpNeq8(v, config)
+       case OpNeqB:
+               return rewriteValueARM_OpNeqB(v, config)
+       case OpNeqPtr:
+               return rewriteValueARM_OpNeqPtr(v, config)
+       case OpNilCheck:
+               return rewriteValueARM_OpNilCheck(v, config)
+       case OpNot:
+               return rewriteValueARM_OpNot(v, config)
+       case OpOffPtr:
+               return rewriteValueARM_OpOffPtr(v, config)
+       case OpOr16:
+               return rewriteValueARM_OpOr16(v, config)
+       case OpOr32:
+               return rewriteValueARM_OpOr32(v, config)
+       case OpOr8:
+               return rewriteValueARM_OpOr8(v, config)
+       case OpOrB:
+               return rewriteValueARM_OpOrB(v, config)
+       case OpRsh16Ux16:
+               return rewriteValueARM_OpRsh16Ux16(v, config)
+       case OpRsh16Ux32:
+               return rewriteValueARM_OpRsh16Ux32(v, config)
+       case OpRsh16Ux64:
+               return rewriteValueARM_OpRsh16Ux64(v, config)
+       case OpRsh16Ux8:
+               return rewriteValueARM_OpRsh16Ux8(v, config)
+       case OpRsh16x16:
+               return rewriteValueARM_OpRsh16x16(v, config)
+       case OpRsh16x32:
+               return rewriteValueARM_OpRsh16x32(v, config)
+       case OpRsh16x64:
+               return rewriteValueARM_OpRsh16x64(v, config)
+       case OpRsh16x8:
+               return rewriteValueARM_OpRsh16x8(v, config)
+       case OpRsh32Ux16:
+               return rewriteValueARM_OpRsh32Ux16(v, config)
+       case OpRsh32Ux32:
+               return rewriteValueARM_OpRsh32Ux32(v, config)
+       case OpRsh32Ux64:
+               return rewriteValueARM_OpRsh32Ux64(v, config)
+       case OpRsh32Ux8:
+               return rewriteValueARM_OpRsh32Ux8(v, config)
+       case OpRsh32x16:
+               return rewriteValueARM_OpRsh32x16(v, config)
+       case OpRsh32x32:
+               return rewriteValueARM_OpRsh32x32(v, config)
+       case OpRsh32x64:
+               return rewriteValueARM_OpRsh32x64(v, config)
+       case OpRsh32x8:
+               return rewriteValueARM_OpRsh32x8(v, config)
+       case OpRsh8Ux16:
+               return rewriteValueARM_OpRsh8Ux16(v, config)
+       case OpRsh8Ux32:
+               return rewriteValueARM_OpRsh8Ux32(v, config)
+       case OpRsh8Ux64:
+               return rewriteValueARM_OpRsh8Ux64(v, config)
+       case OpRsh8Ux8:
+               return rewriteValueARM_OpRsh8Ux8(v, config)
+       case OpRsh8x16:
+               return rewriteValueARM_OpRsh8x16(v, config)
+       case OpRsh8x32:
+               return rewriteValueARM_OpRsh8x32(v, config)
+       case OpRsh8x64:
+               return rewriteValueARM_OpRsh8x64(v, config)
+       case OpRsh8x8:
+               return rewriteValueARM_OpRsh8x8(v, config)
+       case OpSignExt16to32:
+               return rewriteValueARM_OpSignExt16to32(v, config)
+       case OpSignExt8to16:
+               return rewriteValueARM_OpSignExt8to16(v, config)
+       case OpSignExt8to32:
+               return rewriteValueARM_OpSignExt8to32(v, config)
+       case OpSignmask:
+               return rewriteValueARM_OpSignmask(v, config)
+       case OpSqrt:
+               return rewriteValueARM_OpSqrt(v, config)
+       case OpStaticCall:
+               return rewriteValueARM_OpStaticCall(v, config)
+       case OpStore:
+               return rewriteValueARM_OpStore(v, config)
+       case OpSub16:
+               return rewriteValueARM_OpSub16(v, config)
+       case OpSub32:
+               return rewriteValueARM_OpSub32(v, config)
+       case OpSub32F:
+               return rewriteValueARM_OpSub32F(v, config)
+       case OpSub32carry:
+               return rewriteValueARM_OpSub32carry(v, config)
+       case OpSub32withcarry:
+               return rewriteValueARM_OpSub32withcarry(v, config)
+       case OpSub64F:
+               return rewriteValueARM_OpSub64F(v, config)
+       case OpSub8:
+               return rewriteValueARM_OpSub8(v, config)
+       case OpSubPtr:
+               return rewriteValueARM_OpSubPtr(v, config)
+       case OpTrunc16to8:
+               return rewriteValueARM_OpTrunc16to8(v, config)
+       case OpTrunc32to16:
+               return rewriteValueARM_OpTrunc32to16(v, config)
+       case OpTrunc32to8:
+               return rewriteValueARM_OpTrunc32to8(v, config)
+       case OpXor16:
+               return rewriteValueARM_OpXor16(v, config)
+       case OpXor32:
+               return rewriteValueARM_OpXor32(v, config)
+       case OpXor8:
+               return rewriteValueARM_OpXor8(v, config)
+       case OpZero:
+               return rewriteValueARM_OpZero(v, config)
+       case OpZeroExt16to32:
+               return rewriteValueARM_OpZeroExt16to32(v, config)
+       case OpZeroExt8to16:
+               return rewriteValueARM_OpZeroExt8to16(v, config)
+       case OpZeroExt8to32:
+               return rewriteValueARM_OpZeroExt8to32(v, config)
+       case OpZeromask:
+               return rewriteValueARM_OpZeromask(v, config)
+       }
+       return false
+}
+func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ADC (MOVWconst [c]) x flags)
+       // cond:
+       // result: (ADCconst [c] x flags)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (ADC x (MOVWconst [c]) flags)
+       // cond:
+       // result: (ADCconst [c] x flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMADCconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (ADC x (SLLconst [c] y) flags)
+       // cond:
+       // result: (ADCshiftLL x y [c] flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
                y := v_1.Args[0]
                flags := v.Args[2]
-               v.reset(OpARMADCshiftRA)
+               v.reset(OpARMADCshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (ADC (SLLconst [c] y) x flags)
+       // cond:
+       // result: (ADCshiftLL x y [c] flags)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (ADC x (SRLconst [c] y) flags)
+       // cond:
+       // result: (ADCshiftRL x y [c] flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (ADC (SRLconst [c] y) x flags)
+       // cond:
+       // result: (ADCshiftRL x y [c] flags)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (ADC x (SRAconst [c] y) flags)
+       // cond:
+       // result: (ADCshiftRA x y [c] flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMADCshiftRA)
                v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
                v.AddArg(flags)
                return true
        }
@@ -870,9 +870,9 @@ func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
                x := v.Args[1]
                flags := v.Args[2]
                v.reset(OpARMADCshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                v.AddArg(flags)
                return true
        }
@@ -1040,19 +1040,19 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                flags := v.Args[2]
                v.reset(OpARMADCconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                v.AddArg(flags)
                return true
@@ -1061,17 +1061,17 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                flags := v.Args[2]
                v.reset(OpARMADCconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
                v.AddArg(flags)
                return true
        }
@@ -1114,9 +1114,9 @@ func rewriteValueARM_OpARMADCshiftLLreg(v *Value, config *Config) bool {
                c := v_2.AuxInt
                flags := v.Args[3]
                v.reset(OpARMADCshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                v.AddArg(flags)
                return true
        }
@@ -1129,19 +1129,19 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                flags := v.Args[2]
                v.reset(OpARMADCconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                v.AddArg(flags)
                return true
@@ -1150,17 +1150,17 @@ func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (ADCconst x [int64(int32(c)>>uint64(d))] flags)
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                flags := v.Args[2]
                v.reset(OpARMADCconst)
-               v.AddArg(x)
                v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
                v.AddArg(flags)
                return true
        }
@@ -1203,9 +1203,9 @@ func rewriteValueARM_OpARMADCshiftRAreg(v *Value, config *Config) bool {
                c := v_2.AuxInt
                flags := v.Args[3]
                v.reset(OpARMADCshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                v.AddArg(flags)
                return true
        }
@@ -1218,19 +1218,19 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                flags := v.Args[2]
                v.reset(OpARMADCconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                v.AddArg(flags)
                return true
@@ -1239,17 +1239,17 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                flags := v.Args[2]
                v.reset(OpARMADCconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
                v.AddArg(flags)
                return true
        }
@@ -1292,9 +1292,9 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value, config *Config) bool {
                c := v_2.AuxInt
                flags := v.Args[3]
                v.reset(OpARMADCshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                v.AddArg(flags)
                return true
        }
@@ -1345,9 +1345,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMADDshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADD (SLLconst [c] y) x)
@@ -1362,9 +1362,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMADDshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADD x (SRLconst [c] y))
@@ -1379,9 +1379,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMADDshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADD (SRLconst [c] y) x)
@@ -1396,9 +1396,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMADDshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADD x (SRAconst [c] y))
@@ -1413,9 +1413,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMADDshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADD (SRAconst [c] y) x)
@@ -1430,9 +1430,9 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMADDshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADD x (SLL y z))
@@ -1654,9 +1654,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMADDSshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADDS (SLLconst [c] y) x)
@@ -1671,9 +1671,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMADDSshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADDS x (SRLconst [c] y))
@@ -1688,9 +1688,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMADDSshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADDS (SRLconst [c] y) x)
@@ -1705,9 +1705,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMADDSshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADDS x (SRAconst [c] y))
@@ -1722,9 +1722,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMADDSshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADDS (SRAconst [c] y) x)
@@ -1739,9 +1739,9 @@ func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMADDSshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (ADDS x (SLL y z))
@@ -1855,18 +1855,18 @@ func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMADDSconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
@@ -1874,16 +1874,16 @@ func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (ADDSconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMADDSconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -1922,9 +1922,9 @@ func rewriteValueARM_OpARMADDSshiftLLreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMADDSshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -1936,18 +1936,18 @@ func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMADDSconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
@@ -1955,16 +1955,16 @@ func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (ADDSconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMADDSconst)
-               v.AddArg(x)
                v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -2003,9 +2003,9 @@ func rewriteValueARM_OpARMADDSshiftRAreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMADDSshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -2017,18 +2017,18 @@ func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMADDSconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
@@ -2036,16 +2036,16 @@ func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (ADDSconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMADDSconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -2084,9 +2084,9 @@ func rewriteValueARM_OpARMADDSshiftRLreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMADDSshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -2196,18 +2196,18 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMADDconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
@@ -2215,16 +2215,16 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (ADDconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMADDconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -2263,9 +2263,9 @@ func rewriteValueARM_OpARMADDshiftLLreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMADDshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -2277,18 +2277,18 @@ func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMADDconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
@@ -2296,16 +2296,16 @@ func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (ADDconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMADDconst)
-               v.AddArg(x)
                v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -2344,9 +2344,9 @@ func rewriteValueARM_OpARMADDshiftRAreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMADDshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -2358,18 +2358,18 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMADDconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
@@ -2377,16 +2377,16 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (ADDconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMADDconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -2425,9 +2425,9 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMADDshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -2477,9 +2477,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMANDshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (AND (SLLconst [c] y) x)
@@ -2494,9 +2494,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMANDshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (AND x (SRLconst [c] y))
@@ -2511,9 +2511,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMANDshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (AND (SRLconst [c] y) x)
@@ -2528,9 +2528,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMANDshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (AND x (SRAconst [c] y))
@@ -2545,9 +2545,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMANDshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (AND (SRAconst [c] y) x)
@@ -2562,9 +2562,9 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
                y := v_0.Args[0]
                x := v.Args[1]
                v.reset(OpARMANDshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (AND x (SLL y z))
@@ -2674,400 +2674,194 @@ func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
        // result: x
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (AND x (MVN y))
-       // cond:
-       // result: (BIC x y)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMVN {
-                       break
-               }
-               y := v_1.Args[0]
-               v.reset(OpARMBIC)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-       // match: (AND x (MVNshiftLL y [c]))
-       // cond:
-       // result: (BICshiftLL x y [c])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMVNshiftLL {
-                       break
-               }
-               y := v_1.Args[0]
-               c := v_1.AuxInt
-               v.reset(OpARMBICshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               return true
-       }
-       // match: (AND x (MVNshiftRL y [c]))
-       // cond:
-       // result: (BICshiftRL x y [c])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMVNshiftRL {
-                       break
-               }
-               y := v_1.Args[0]
-               c := v_1.AuxInt
-               v.reset(OpARMBICshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               return true
-       }
-       // match: (AND x (MVNshiftRA y [c]))
-       // cond:
-       // result: (BICshiftRA x y [c])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMVNshiftRA {
-                       break
-               }
-               y := v_1.Args[0]
-               c := v_1.AuxInt
-               v.reset(OpARMBICshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ANDconst [0] _)
-       // cond:
-       // result: (MOVWconst [0])
-       for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (ANDconst [c] x)
-       // cond: int32(c)==-1
-       // result: x
-       for {
-               c := v.AuxInt
-               x := v.Args[0]
-               if !(int32(c) == -1) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
-               return true
-       }
-       // match: (ANDconst [c] (MOVWconst [d]))
-       // cond:
-       // result: (MOVWconst [c&d])
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = c & d
-               return true
-       }
-       // match: (ANDconst [c] (ANDconst [d] x))
-       // cond:
-       // result: (ANDconst [c&d] x)
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & d
-               v.AddArg(x)
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ANDshiftLL (MOVWconst [c]) x [d])
-       // cond:
-       // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMANDconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
-               return true
-       }
-       // match: (ANDshiftLL x (MOVWconst [c]) [d])
-       // cond:
-       // result: (ANDconst x [int64(uint32(c)<<uint64(d))])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMANDconst)
-               v.AddArg(x)
-               v.AuxInt = int64(uint32(c) << uint64(d))
-               return true
-       }
-       // match: (ANDshiftLL x y:(SLLconst x [c]) [d])
-       // cond: c==d
-       // result: y
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               if y.Op != OpARMSLLconst {
-                       break
-               }
-               if x != y.Args[0] {
-                       break
-               }
-               c := y.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = y.Type
-               v.AddArg(y)
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ANDshiftLLreg (MOVWconst [c]) x y)
-       // cond:
-       // result: (ANDconst [c] (SLL <x.Type> x y))
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMANDconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (ANDshiftLLreg x y (MOVWconst [c]))
+       // match: (AND x (MVN y))
        // cond:
-       // result: (ANDshiftLL x y [c])
+       // result: (BIC x y)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMVN {
                        break
                }
-               c := v_2.AuxInt
-               v.reset(OpARMANDshiftLL)
+               y := v_1.Args[0]
+               v.reset(OpARMBIC)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ANDshiftRA (MOVWconst [c]) x [d])
+       // match: (AND x (MVNshiftLL y [c]))
        // cond:
-       // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+       // result: (BICshiftLL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMVNshiftLL {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMANDconst)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMBICshiftLL)
                v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (ANDshiftRA x (MOVWconst [c]) [d])
+       // match: (AND x (MVNshiftRL y [c]))
        // cond:
-       // result: (ANDconst x [int64(int32(c)>>uint64(d))])
+       // result: (BICshiftRL x y [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMMVNshiftRL {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMANDconst)
+               y := v_1.Args[0]
+               v.reset(OpARMBICshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(y)
                return true
        }
-       // match: (ANDshiftRA x y:(SRAconst x [c]) [d])
-       // cond: c==d
-       // result: y
+       // match: (AND x (MVNshiftRA y [c]))
+       // cond:
+       // result: (BICshiftRA x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               if y.Op != OpARMSRAconst {
-                       break
-               }
-               if x != y.Args[0] {
-                       break
-               }
-               c := y.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMVNshiftRA {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = y.Type
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMBICshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
                v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ANDshiftRAreg (MOVWconst [c]) x y)
+       // match: (ANDconst [0] _)
        // cond:
-       // result: (ANDconst [c] (SRA <x.Type> x y))
+       // result: (MOVWconst [0])
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (ANDconst [c] x)
+       // cond: int32(c)==-1
+       // result: x
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (ANDconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [c&d])
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMANDconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c & d
                return true
        }
-       // match: (ANDshiftRAreg x y (MOVWconst [c]))
+       // match: (ANDconst [c] (ANDconst [d] x))
        // cond:
-       // result: (ANDshiftRA x y [c])
+       // result: (ANDconst [c&d] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
                        break
                }
-               c := v_2.AuxInt
-               v.reset(OpARMANDshiftRA)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & d
                v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ANDshiftRL (MOVWconst [c]) x [d])
+       // match: (ANDshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+       // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMANDconst)
                v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-       // match: (ANDshiftRL x (MOVWconst [c]) [d])
+       // match: (ANDshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (ANDconst x [int64(uint32(c)>>uint64(d))])
+       // result: (ANDconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMANDconst)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                v.AddArg(x)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
                return true
        }
-       // match: (ANDshiftRL x y:(SRLconst x [c]) [d])
+       // match: (ANDshiftLL x y:(SLLconst x [c]) [d])
        // cond: c==d
        // result: y
        for {
+               d := v.AuxInt
                x := v.Args[0]
                y := v.Args[1]
-               if y.Op != OpARMSRLconst {
+               if y.Op != OpARMSLLconst {
                        break
                }
+               c := y.AuxInt
                if x != y.Args[0] {
                        break
                }
-               c := y.AuxInt
-               d := v.AuxInt
                if !(c == d) {
                        break
                }
@@ -3078,12 +2872,12 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ANDshiftRLreg (MOVWconst [c]) x y)
+       // match: (ANDshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (ANDconst [c] (SRL <x.Type> x y))
+       // result: (ANDconst [c] (SLL <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -3094,15 +2888,15 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
                y := v.Args[2]
                v.reset(OpARMANDconst)
                v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-       // match: (ANDshiftRLreg x y (MOVWconst [c]))
+       // match: (ANDshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (ANDshiftRL x y [c])
+       // result: (ANDshiftLL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
@@ -3111,210 +2905,219 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
                        break
                }
                c := v_2.AuxInt
-               v.reset(OpARMANDshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMANDshiftLL)
                v.AuxInt = c
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add16 x y)
-       // cond:
-       // result: (ADD x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADD)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32 x y)
-       // cond:
-       // result: (ADD x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADD)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32F x y)
-       // cond:
-       // result: (ADDF x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADDF)
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add32carry x y)
+       // match: (ANDshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (ADDS x y)
+       // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADDS)
-               v.AddArg(x)
-               v.AddArg(y)
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32withcarry x y c)
+       // match: (ANDshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (ADC x y c)
+       // result: (ANDconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               c := v.Args[2]
-               v.reset(OpARMADC)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMANDconst)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(c)
                return true
        }
-}
-func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add64F x y)
-       // cond:
-       // result: (ADDD x y)
+       // match: (ANDshiftRA x y:(SRAconst x [c]) [d])
+       // cond: c==d
+       // result: y
        for {
+               d := v.AuxInt
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMADDD)
-               v.AddArg(x)
+               if y.Op != OpARMSRAconst {
+                       break
+               }
+               c := y.AuxInt
+               if x != y.Args[0] {
+                       break
+               }
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
                v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add8 x y)
+       // match: (ANDshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (ADD x y)
+       // result: (ANDconst [c] (SRA <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMADD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (AddPtr x y)
+       // match: (ANDshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (ADD x y)
+       // result: (ANDshiftRA x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMADD)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMANDshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Addr {sym} base)
+       // match: (ANDshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (MOVWaddr {sym} base)
+       // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
        for {
-               sym := v.Aux
-               base := v.Args[0]
-               v.reset(OpARMMOVWaddr)
-               v.Aux = sym
-               v.AddArg(base)
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And16 x y)
+       // match: (ANDshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (AND x y)
+       // result: (ANDconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMAND)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMANDconst)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And32 x y)
-       // cond:
-       // result: (AND x y)
+       // match: (ANDshiftRL x y:(SRLconst x [c]) [d])
+       // cond: c==d
+       // result: y
        for {
+               d := v.AuxInt
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMAND)
-               v.AddArg(x)
+               if y.Op != OpARMSRLconst {
+                       break
+               }
+               c := y.AuxInt
+               if x != y.Args[0] {
+                       break
+               }
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
                v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (And8 x y)
+       // match: (ANDshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (AND x y)
+       // result: (ANDconst [c] (SRL <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMAND)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (AndB x y)
+       // match: (ANDshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (AND x y)
+       // result: (ANDshiftRL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMAND)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMANDshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
+       return false
 }
 func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
        b := v.Block
@@ -3346,9 +3149,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMBICshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (BIC x (SRLconst [c] y))
@@ -3363,9 +3166,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMBICshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (BIC x (SRAconst [c] y))
@@ -3380,9 +3183,9 @@ func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMBICshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (BIC x (SLL y z))
@@ -3501,32 +3304,32 @@ func rewriteValueARM_OpARMBICshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (BICconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMBICconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
                return true
        }
        // match: (BICshiftLL x (SLLconst x [c]) [d])
        // cond: c==d
        // result: (MOVWconst [0])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMSLLconst {
                        break
                }
+               c := v_1.AuxInt
                if x != v_1.Args[0] {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
                if !(c == d) {
                        break
                }
@@ -3551,9 +3354,9 @@ func rewriteValueARM_OpARMBICshiftLLreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMBICshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -3565,32 +3368,32 @@ func rewriteValueARM_OpARMBICshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (BICconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMBICconst)
-               v.AddArg(x)
                v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
        // match: (BICshiftRA x (SRAconst x [c]) [d])
        // cond: c==d
        // result: (MOVWconst [0])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMSRAconst {
                        break
                }
+               c := v_1.AuxInt
                if x != v_1.Args[0] {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
                if !(c == d) {
                        break
                }
@@ -3615,9 +3418,9 @@ func rewriteValueARM_OpARMBICshiftRAreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMBICshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -3629,32 +3432,32 @@ func rewriteValueARM_OpARMBICshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (BICconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMBICconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
        // match: (BICshiftRL x (SRLconst x [c]) [d])
        // cond: c==d
        // result: (MOVWconst [0])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMSRLconst {
                        break
                }
+               c := v_1.AuxInt
                if x != v_1.Args[0] {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
                if !(c == d) {
                        break
                }
@@ -3679,9 +3482,9 @@ func rewriteValueARM_OpARMBICshiftRLreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMBICshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -3693,11 +3496,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
        // cond:
        // result: (MOVWconst [c])
        for {
+               c := v.AuxInt
                v_1 := v.Args[1]
                if v_1.Op != OpARMFlagEQ {
                        break
                }
-               c := v.AuxInt
                v.reset(OpARMMOVWconst)
                v.AuxInt = c
                return true
@@ -3720,11 +3523,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
        // cond:
        // result: (MOVWconst [c])
        for {
+               c := v.AuxInt
                v_1 := v.Args[1]
                if v_1.Op != OpARMFlagLT_UGT {
                        break
                }
-               c := v.AuxInt
                v.reset(OpARMMOVWconst)
                v.AuxInt = c
                return true
@@ -3747,11 +3550,11 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
        // cond:
        // result: (MOVWconst [c])
        for {
+               c := v.AuxInt
                v_1 := v.Args[1]
                if v_1.Op != OpARMFlagGT_UGT {
                        break
                }
-               c := v.AuxInt
                v.reset(OpARMMOVWconst)
                v.AuxInt = c
                return true
@@ -3760,17 +3563,17 @@ func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
        // cond:
        // result: (CMOVWLSconst x flags [c])
        for {
+               c := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMInvertFlags {
                        break
                }
                flags := v_1.Args[0]
-               c := v.AuxInt
                v.reset(OpARMCMOVWLSconst)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(flags)
-               v.AuxInt = c
                return true
        }
        return false
@@ -3782,11 +3585,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
        // cond:
        // result: (MOVWconst [c])
        for {
+               c := v.AuxInt
                v_1 := v.Args[1]
                if v_1.Op != OpARMFlagEQ {
                        break
                }
-               c := v.AuxInt
                v.reset(OpARMMOVWconst)
                v.AuxInt = c
                return true
@@ -3795,11 +3598,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
        // cond:
        // result: (MOVWconst [c])
        for {
+               c := v.AuxInt
                v_1 := v.Args[1]
                if v_1.Op != OpARMFlagLT_ULT {
                        break
                }
-               c := v.AuxInt
                v.reset(OpARMMOVWconst)
                v.AuxInt = c
                return true
@@ -3822,11 +3625,11 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
        // cond:
        // result: (MOVWconst [c])
        for {
+               c := v.AuxInt
                v_1 := v.Args[1]
                if v_1.Op != OpARMFlagGT_ULT {
                        break
                }
-               c := v.AuxInt
                v.reset(OpARMMOVWconst)
                v.AuxInt = c
                return true
@@ -3849,17 +3652,17 @@ func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
        // cond:
        // result: (CMOVWHSconst x flags [c])
        for {
+               c := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMInvertFlags {
                        break
                }
                flags := v_1.Args[0]
-               c := v.AuxInt
                v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(flags)
-               v.AuxInt = c
                return true
        }
        return false
@@ -3911,9 +3714,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMCMPshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (CMP (SLLconst [c] y) x)
@@ -3929,9 +3732,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
                x := v.Args[1]
                v.reset(OpARMInvertFlags)
                v0 := b.NewValue0(v.Line, OpARMCMPshiftLL, TypeFlags)
+               v0.AuxInt = c
                v0.AddArg(x)
                v0.AddArg(y)
-               v0.AuxInt = c
                v.AddArg(v0)
                return true
        }
@@ -3947,9 +3750,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMCMPshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (CMP (SRLconst [c] y) x)
@@ -3965,9 +3768,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
                x := v.Args[1]
                v.reset(OpARMInvertFlags)
                v0 := b.NewValue0(v.Line, OpARMCMPshiftRL, TypeFlags)
+               v0.AuxInt = c
                v0.AddArg(x)
                v0.AddArg(y)
-               v0.AuxInt = c
                v.AddArg(v0)
                return true
        }
@@ -3983,9 +3786,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
                c := v_1.AuxInt
                y := v_1.Args[0]
                v.reset(OpARMCMPshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        // match: (CMP (SRAconst [c] y) x)
@@ -4001,9 +3804,9 @@ func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
                x := v.Args[1]
                v.reset(OpARMInvertFlags)
                v0 := b.NewValue0(v.Line, OpARMCMPshiftRA, TypeFlags)
+               v0.AuxInt = c
                v0.AddArg(x)
                v0.AddArg(y)
-               v0.AuxInt = c
                v.AddArg(v0)
                return true
        }
@@ -4166,12 +3969,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: int32(x)==int32(y)
        // result: (FlagEQ)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) == int32(y)) {
                        break
                }
@@ -4182,12 +3985,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: int32(x)<int32(y) && uint32(x)<uint32(y)
        // result: (FlagLT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
                        break
                }
@@ -4198,12 +4001,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: int32(x)<int32(y) && uint32(x)>uint32(y)
        // result: (FlagLT_UGT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
                        break
                }
@@ -4214,12 +4017,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: int32(x)>int32(y) && uint32(x)<uint32(y)
        // result: (FlagGT_ULT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
                        break
                }
@@ -4230,12 +4033,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: int32(x)>int32(y) && uint32(x)>uint32(y)
        // result: (FlagGT_UGT)
        for {
+               y := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                x := v_0.AuxInt
-               y := v.AuxInt
                if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
                        break
                }
@@ -4246,11 +4049,11 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: 0xff < c
        // result: (FlagLT_ULT)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVBUreg {
                        break
                }
-               c := v.AuxInt
                if !(0xff < c) {
                        break
                }
@@ -4261,11 +4064,11 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: 0xffff < c
        // result: (FlagLT_ULT)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVHUreg {
                        break
                }
-               c := v.AuxInt
                if !(0xffff < c) {
                        break
                }
@@ -4276,12 +4079,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: 0 <= int32(m) && int32(m) < int32(n)
        // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMANDconst {
                        break
                }
                m := v_0.AuxInt
-               n := v.AuxInt
                if !(0 <= int32(m) && int32(m) < int32(n)) {
                        break
                }
@@ -4292,12 +4095,12 @@ func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
        // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
        // result: (FlagLT_ULT)
        for {
+               n := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMSRLconst {
                        break
                }
                c := v_0.AuxInt
-               n := v.AuxInt
                if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
                        break
                }
@@ -4313,19 +4116,19 @@ func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMInvertFlags)
                v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
                v0.AuxInt = c
                v1 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v1.AddArg(x)
                v1.AuxInt = d
+               v1.AddArg(x)
                v0.AddArg(v1)
                v.AddArg(v0)
                return true
@@ -4334,16 +4137,16 @@ func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
        // cond:
        // result: (CMPconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMCMPconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -4384,9 +4187,9 @@ func rewriteValueARM_OpARMCMPshiftLLreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMCMPshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -4398,19 +4201,19 @@ func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMInvertFlags)
                v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
                v0.AuxInt = c
                v1 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v1.AddArg(x)
                v1.AuxInt = d
+               v1.AddArg(x)
                v0.AddArg(v1)
                v.AddArg(v0)
                return true
@@ -4419,16 +4222,16 @@ func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
        // cond:
        // result: (CMPconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMCMPconst)
-               v.AddArg(x)
                v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -4469,9 +4272,9 @@ func rewriteValueARM_OpARMCMPshiftRAreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMCMPshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
@@ -4483,19 +4286,19 @@ func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
                v.reset(OpARMInvertFlags)
                v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
                v0.AuxInt = c
                v1 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v1.AddArg(x)
                v1.AuxInt = d
+               v1.AddArg(x)
                v0.AddArg(v1)
                v.AddArg(v0)
                return true
@@ -4504,16 +4307,16 @@ func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
        // cond:
        // result: (CMPconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
                v.reset(OpARMCMPconst)
-               v.AddArg(x)
                v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
        return false
@@ -4554,706 +4357,680 @@ func rewriteValueARM_OpARMCMPshiftRLreg(v *Value, config *Config) bool {
                }
                c := v_2.AuxInt
                v.reset(OpARMCMPshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ClosureCall [argwid] entry closure mem)
+       // match: (DIV (MOVWconst [c]) (MOVWconst [d]))
        // cond:
-       // result: (CALLclosure [argwid] entry closure mem)
+       // result: (MOVWconst [int64(int32(c)/int32(d))])
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               closure := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMCALLclosure)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(closure)
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(c) / int32(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Com16 x)
+       // match: (DIVU x (MOVWconst [1]))
        // cond:
-       // result: (MVN x)
+       // result: x
        for {
                x := v.Args[0]
-               v.reset(OpARMMVN)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com32 x)
-       // cond:
-       // result: (MVN x)
+       // match: (DIVU x (MOVWconst [c]))
+       // cond: isPowerOfTwo(c)
+       // result: (SRLconst [log2(c)] x)
        for {
                x := v.Args[0]
-               v.reset(OpARMMVN)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARMSRLconst)
+               v.AuxInt = log2(c)
                v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com8 x)
+       // match: (DIVU (MOVWconst [c]) (MOVWconst [d]))
        // cond:
-       // result: (MVN x)
+       // result: (MOVWconst [int64(uint32(c)/uint32(d))])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMVN)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_1.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint32(c) / uint32(d))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const16 [val])
+       // match: (Equal (FlagEQ))
        // cond:
-       // result: (MOVWconst [val])
+       // result: (MOVWconst [1])
        for {
-               val := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
                v.reset(OpARMMOVWconst)
-               v.AuxInt = val
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32 [val])
+       // match: (Equal (FlagLT_ULT))
        // cond:
-       // result: (MOVWconst [val])
+       // result: (MOVWconst [0])
        for {
-               val := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
                v.reset(OpARMMOVWconst)
-               v.AuxInt = val
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32F [val])
+       // match: (Equal (FlagLT_UGT))
        // cond:
-       // result: (MOVFconst [val])
+       // result: (MOVWconst [0])
        for {
-               val := v.AuxInt
-               v.reset(OpARMMOVFconst)
-               v.AuxInt = val
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const64F [val])
+       // match: (Equal (FlagGT_ULT))
        // cond:
-       // result: (MOVDconst [val])
+       // result: (MOVWconst [0])
        for {
-               val := v.AuxInt
-               v.reset(OpARMMOVDconst)
-               v.AuxInt = val
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const8 [val])
+       // match: (Equal (FlagGT_UGT))
        // cond:
-       // result: (MOVWconst [val])
+       // result: (MOVWconst [0])
        for {
-               val := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
                v.reset(OpARMMOVWconst)
-               v.AuxInt = val
+               v.AuxInt = 0
+               return true
+       }
+       // match: (Equal (InvertFlags x))
+       // cond:
+       // result: (Equal x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMEqual)
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ConstBool [b])
+       // match: (GreaterEqual (FlagEQ))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterEqual (FlagLT_ULT))
        // cond:
-       // result: (MOVWconst [b])
+       // result: (MOVWconst [0])
        for {
-               b := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
                v.reset(OpARMMOVWconst)
-               v.AuxInt = b
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ConstNil)
+       // match: (GreaterEqual (FlagLT_UGT))
        // cond:
        // result: (MOVWconst [0])
        for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
                v.reset(OpARMMOVWconst)
                v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Convert x mem)
+       // match: (GreaterEqual (FlagGT_ULT))
        // cond:
-       // result: (MOVWconvert x mem)
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVWconvert)
-               v.AddArg(x)
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto32 x)
+       // match: (GreaterEqual (FlagGT_UGT))
        // cond:
-       // result: (MOVFW x)
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVFW)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Fto32U x)
+       // match: (GreaterEqual (InvertFlags x))
        // cond:
-       // result: (MOVFWU x)
+       // result: (LessEqual x)
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVFWU)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMLessEqual)
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Fto64F x)
+       // match: (GreaterEqualU (FlagEQ))
        // cond:
-       // result: (MOVFD x)
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVFD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Uto32F x)
+       // match: (GreaterEqualU (FlagLT_ULT))
        // cond:
-       // result: (MOVWUF x)
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVWUF)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32Uto64F x)
+       // match: (GreaterEqualU (FlagLT_UGT))
        // cond:
-       // result: (MOVWUD x)
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVWUD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32to32F x)
+       // match: (GreaterEqualU (FlagGT_ULT))
        // cond:
-       // result: (MOVWF x)
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVWF)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt32to64F x)
+       // match: (GreaterEqualU (FlagGT_UGT))
        // cond:
-       // result: (MOVWD x)
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVWD)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32 x)
+       // match: (GreaterEqualU (InvertFlags x))
        // cond:
-       // result: (MOVDW x)
+       // result: (LessEqualU x)
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVDW)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMLessEqualU)
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64Fto32F x)
+       // match: (GreaterThan (FlagEQ))
        // cond:
-       // result: (MOVDF x)
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVDF)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Cvt64Fto32U x)
+       // match: (GreaterThan (FlagLT_ULT))
        // cond:
-       // result: (MOVDWU x)
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               v.reset(OpARMMOVDWU)
-               v.AddArg(x)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpARMDIV(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (DIV (MOVWconst [c]) (MOVWconst [d]))
+       // match: (GreaterThan (FlagLT_UGT))
        // cond:
-       // result: (MOVWconst [int64(int32(c)/int32(d))])
+       // result: (MOVWconst [0])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMFlagLT_UGT {
                        break
                }
-               c := v_0.AuxInt
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (GreaterThan (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
                        break
                }
-               d := v_1.AuxInt
                v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(c) / int32(d))
+               v.AuxInt = 1
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMDIVU(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (DIVU x (MOVWconst [1]))
+       // match: (GreaterThan (FlagGT_UGT))
        // cond:
-       // result: x
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
                        break
                }
-               if v_1.AuxInt != 1 {
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (GreaterThan (InvertFlags x))
+       // cond:
+       // result: (LessThan x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               x := v_0.Args[0]
+               v.reset(OpARMLessThan)
                v.AddArg(x)
                return true
        }
-       // match: (DIVU x (MOVWconst [c]))
-       // cond: isPowerOfTwo(c)
-       // result: (SRLconst [log2(c)] x)
+       return false
+}
+func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GreaterThanU (FlagEQ))
+       // cond:
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(isPowerOfTwo(c)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
                        break
                }
-               v.reset(OpARMSRLconst)
-               v.AuxInt = log2(c)
-               v.AddArg(x)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (DIVU (MOVWconst [c]) (MOVWconst [d]))
+       // match: (GreaterThanU (FlagLT_ULT))
        // cond:
-       // result: (MOVWconst [int64(uint32(c)/uint32(d))])
+       // result: (MOVWconst [0])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMFlagLT_ULT {
                        break
                }
-               d := v_1.AuxInt
                v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(uint32(c) / uint32(d))
+               v.AuxInt = 0
                return true
        }
-       return false
-}
-func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (DeferCall [argwid] mem)
+       // match: (GreaterThanU (FlagLT_UGT))
        // cond:
-       // result: (CALLdefer [argwid] mem)
+       // result: (MOVWconst [1])
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpARMCALLdefer)
-               v.AuxInt = argwid
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div16 x y)
+       // match: (GreaterThanU (FlagGT_ULT))
        // cond:
-       // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIV)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div16u x y)
+       // match: (GreaterThanU (FlagGT_UGT))
        // cond:
-       // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVU)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32 x y)
+       // match: (GreaterThanU (InvertFlags x))
        // cond:
-       // result: (DIV x y)
+       // result: (LessThanU x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIV)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMLessThanU)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div32F x y)
+       // match: (LessEqual (FlagEQ))
        // cond:
-       // result: (DIVF x y)
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVF)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div32u x y)
+       // match: (LessEqual (FlagLT_ULT))
        // cond:
-       // result: (DIVU x y)
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVU)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div64F x y)
+       // match: (LessEqual (FlagLT_UGT))
        // cond:
-       // result: (DIVD x y)
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVD)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div8 x y)
+       // match: (LessEqual (FlagGT_ULT))
        // cond:
-       // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIV)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Div8u x y)
+       // match: (LessEqual (FlagGT_UGT))
        // cond:
-       // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMDIVU)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq16 x y)
+       // match: (LessEqual (InvertFlags x))
        // cond:
-       // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (GreaterEqual x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMGreaterEqual)
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq32 x y)
+       // match: (LessEqualU (FlagEQ))
        // cond:
-       // result: (Equal (CMP x y))
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq32F x y)
+       // match: (LessEqualU (FlagLT_ULT))
        // cond:
-       // result: (Equal (CMPF x y))
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq64F x y)
+       // match: (LessEqualU (FlagLT_UGT))
        // cond:
-       // result: (Equal (CMPD x y))
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Eq8 x y)
+       // match: (LessEqualU (FlagGT_ULT))
        // cond:
-       // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (EqB x y)
+       // match: (LessEqualU (FlagGT_UGT))
        // cond:
-       // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMXORconst)
-               v.AuxInt = 1
-               v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (EqPtr x y)
+       // match: (LessEqualU (InvertFlags x))
        // cond:
-       // result: (Equal (CMP x y))
+       // result: (GreaterEqualU x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMGreaterEqualU)
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Equal (FlagEQ))
+       // match: (LessThan (FlagEQ))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVWconst [0])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMFlagEQ {
                        break
                }
                v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               v.AuxInt = 0
                return true
        }
-       // match: (Equal (FlagLT_ULT))
+       // match: (LessThan (FlagLT_ULT))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVWconst [1])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMFlagLT_ULT {
                        break
                }
                v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.AuxInt = 1
                return true
        }
-       // match: (Equal (FlagLT_UGT))
+       // match: (LessThan (FlagLT_UGT))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVWconst [1])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMFlagLT_UGT {
                        break
                }
                v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.AuxInt = 1
                return true
        }
-       // match: (Equal (FlagGT_ULT))
+       // match: (LessThan (FlagGT_ULT))
        // cond:
        // result: (MOVWconst [0])
        for {
@@ -5265,7 +5042,7 @@ func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
                v.AuxInt = 0
                return true
        }
-       // match: (Equal (FlagGT_UGT))
+       // match: (LessThan (FlagGT_UGT))
        // cond:
        // result: (MOVWconst [0])
        for {
@@ -5277,4167 +5054,4377 @@ func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
                v.AuxInt = 0
                return true
        }
-       // match: (Equal (InvertFlags x))
+       // match: (LessThan (InvertFlags x))
        // cond:
-       // result: (Equal x)
+       // result: (GreaterThan x)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMInvertFlags {
                        break
                }
                x := v_0.Args[0]
-               v.reset(OpARMEqual)
+               v.reset(OpARMGreaterThan)
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq16 x y)
+       // match: (LessThanU (FlagEQ))
        // cond:
-       // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagEQ {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq16U x y)
+       // match: (LessThanU (FlagLT_ULT))
        // cond:
-       // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVWconst [1])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-}
-func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32 x y)
+       // match: (LessThanU (FlagLT_UGT))
        // cond:
-       // result: (GreaterEqual (CMP x y))
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
+               return true
+       }
+       // match: (LessThanU (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_ULT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
+               return true
+       }
+       // match: (LessThanU (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [0])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagGT_UGT {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq32F x y)
+       // match: (LessThanU (InvertFlags x))
        // cond:
-       // result: (GreaterEqual (CMPF x y))
+       // result: (GreaterThanU x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
+                       break
+               }
+               x := v_0.Args[0]
+               v.reset(OpARMGreaterThanU)
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq32U x y)
+       // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (GreaterEqualU (CMP x y))
+       // result: (MOVBUload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq64F x y)
-       // cond:
-       // result: (GreaterEqual (CMPD x y))
+       // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Geq8 x y)
-       // cond:
-       // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
+       // result: x
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVBstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq8U x y)
+       // match: (MOVBUreg x:(MOVBUload _ _))
        // cond:
-       // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVBUload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GetClosurePtr)
+       // match: (MOVBUreg (ANDconst [c] x))
        // cond:
-       // result: (LoweredGetClosurePtr)
+       // result: (ANDconst [c&0xff] x)
        for {
-               v.reset(OpARMLoweredGetClosurePtr)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & 0xff
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GoCall [argwid] mem)
+       // match: (MOVBUreg x:(MOVBUreg _))
        // cond:
-       // result: (CALLgo [argwid] mem)
+       // result: (MOVWreg x)
        for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpARMCALLgo)
-               v.AuxInt = argwid
-               v.AddArg(mem)
+               x := v.Args[0]
+               if x.Op != OpARMMOVBUreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater16 x y)
+       // match: (MOVBUreg (MOVWconst [c]))
        // cond:
-       // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVWconst [int64(uint8(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint8(c))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater16U x y)
+       // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVBload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32 x y)
-       // cond:
-       // result: (GreaterThan (CMP x y))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+       // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater32F x y)
-       // cond:
-       // result: (GreaterThan (CMPF x y))
+       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+       // result: x
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVBstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater32U x y)
+       // match: (MOVBreg x:(MOVBload _ _))
        // cond:
-       // result: (GreaterThanU (CMP x y))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVBload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater64F x y)
-       // cond:
-       // result: (GreaterThan (CMPD x y))
+       // match: (MOVBreg (ANDconst [c] x))
+       // cond: c & 0x80 == 0
+       // result: (ANDconst [c&0x7f] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(c&0x80 == 0) {
+                       break
+               }
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & 0x7f
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater8 x y)
+       // match: (MOVBreg x:(MOVBreg _))
        // cond:
-       // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVBreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Greater8U x y)
+       // match: (MOVBreg (MOVWconst [c]))
        // cond:
-       // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (MOVWconst [int64(int8(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int8(c))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GreaterEqual (FlagEQ))
+       // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
        for {
+               off1 := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterEqual (FlagLT_ULT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMMOVWaddr {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterEqual (FlagLT_UGT))
+       // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVBreg {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterEqual (FlagGT_ULT))
+       // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVBUreg {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterEqual (FlagGT_UGT))
+       // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHreg {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterEqual (InvertFlags x))
+       // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
        // cond:
-       // result: (LessEqual x)
+       // result: (MOVBstore [off] {sym} ptr x mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHUreg {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMLessEqual)
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
                v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GreaterEqualU (FlagEQ))
-       // cond:
-       // result: (MOVWconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
-               return true
-       }
-       // match: (GreaterEqualU (FlagLT_ULT))
+       // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVDload [off1+off2] {sym} ptr mem)
        for {
+               off1 := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterEqualU (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               if v_0.Op != OpARMMOVWaddr {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
-               return true
-       }
-       // match: (GreaterEqualU (FlagGT_ULT))
-       // cond:
-       // result: (MOVWconst [0])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.reset(OpARMMOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterEqualU (FlagGT_UGT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVDstore {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
-               return true
-       }
-       // match: (GreaterEqualU (InvertFlags x))
-       // cond:
-       // result: (LessEqualU x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMLessEqualU)
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GreaterThan (FlagEQ))
+       // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
        for {
+               off1 := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterThan (FlagLT_ULT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMMOVWaddr {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (GreaterThan (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [0])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.reset(OpARMMOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterThan (FlagGT_ULT))
+       return false
+}
+func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVFload [off1+off2] {sym} ptr mem)
        for {
+               off1 := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVFload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterThan (FlagGT_UGT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               if v_0.Op != OpARMMOVWaddr {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVFload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterThan (InvertFlags x))
-       // cond:
-       // result: (LessThan x)
+       // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVFstore {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMLessThan)
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (GreaterThanU (FlagEQ))
+       // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVFstore [off1+off2] {sym} ptr val mem)
        for {
+               off1 := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVFstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterThanU (FlagLT_ULT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMMOVWaddr {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (GreaterThanU (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               v.reset(OpARMMOVFstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterThanU (FlagGT_ULT))
+       return false
+}
+func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVHUload [off1+off2] {sym} ptr mem)
        for {
+               off1 := v.AuxInt
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterThanU (FlagGT_UGT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               if v_0.Op != OpARMMOVWaddr {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (GreaterThanU (InvertFlags x))
-       // cond:
-       // result: (LessThanU x)
+       // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHstore {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMLessThanU)
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul16 x y)
-       // cond:
-       // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v.AuxInt = 16
-               return true
-       }
-}
-func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul16u x y)
-       // cond:
-       // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRLconst)
-               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v.AuxInt = 16
-               return true
-       }
-}
-func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul32 x y)
+       // match: (MOVHUreg x:(MOVBUload _ _))
        // cond:
-       // result: (HMUL x y)
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMHMUL)
+               if x.Op != OpARMMOVBUload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul32u x y)
+       // match: (MOVHUreg x:(MOVHUload _ _))
        // cond:
-       // result: (HMULU x y)
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMHMULU)
+               if x.Op != OpARMMOVHUload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
                v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul8 x y)
-       // cond:
-       // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v.AuxInt = 8
-               return true
-       }
-}
-func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Hmul8u x y)
-       // cond:
-       // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSRLconst)
-               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v.AuxInt = 8
                return true
        }
-}
-func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (InterCall [argwid] entry mem)
+       // match: (MOVHUreg (ANDconst [c] x))
        // cond:
-       // result: (CALLinter [argwid] entry mem)
+       // result: (ANDconst [c&0xffff] x)
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMCALLinter)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & 0xffff
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsInBounds idx len)
+       // match: (MOVHUreg x:(MOVBUreg _))
        // cond:
-       // result: (LessThanU (CMP idx len))
+       // result: (MOVWreg x)
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpARMLessThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               x := v.Args[0]
+               if x.Op != OpARMMOVBUreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsNonNil ptr)
+       // match: (MOVHUreg x:(MOVHUreg _))
        // cond:
-       // result: (NotEqual (CMPconst [0] ptr))
+       // result: (MOVWreg x)
        for {
-               ptr := v.Args[0]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v0.AuxInt = 0
-               v0.AddArg(ptr)
-               v.AddArg(v0)
+               x := v.Args[0]
+               if x.Op != OpARMMOVHUreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (IsSliceInBounds idx len)
+       // match: (MOVHUreg (MOVWconst [c]))
        // cond:
-       // result: (LessEqualU (CMP idx len))
+       // result: (MOVWconst [int64(uint16(c))])
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpARMLessEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint16(c))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq16 x y)
+       // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVHload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq16U x y)
-       // cond:
-       // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32 x y)
-       // cond:
-       // result: (LessEqual (CMP x y))
+       // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
+       // result: x
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHstore {
+                       break
+               }
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq32F x y)
+       // match: (MOVHreg x:(MOVBload _ _))
        // cond:
-       // result: (GreaterEqual (CMPF y x))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVBload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq32U x y)
+       // match: (MOVHreg x:(MOVBUload _ _))
        // cond:
-       // result: (LessEqualU (CMP x y))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVBUload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq64F x y)
+       // match: (MOVHreg x:(MOVHload _ _))
        // cond:
-       // result: (GreaterEqual (CMPD y x))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVHload {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8 x y)
-       // cond:
-       // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       // match: (MOVHreg (ANDconst [c] x))
+       // cond: c & 0x8000 == 0
+       // result: (ANDconst [c&0x7fff] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMANDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               if !(c&0x8000 == 0) {
+                       break
+               }
+               v.reset(OpARMANDconst)
+               v.AuxInt = c & 0x7fff
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Leq8U x y)
+       // match: (MOVHreg x:(MOVBreg _))
        // cond:
-       // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessEqualU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVBreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less16 x y)
+       // match: (MOVHreg x:(MOVBUreg _))
        // cond:
-       // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVBUreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less16U x y)
+       // match: (MOVHreg x:(MOVHreg _))
        // cond:
-       // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVWreg x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               if x.Op != OpARMMOVHreg {
+                       break
+               }
+               v.reset(OpARMMOVWreg)
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32 x y)
+       // match: (MOVHreg (MOVWconst [c]))
        // cond:
-       // result: (LessThan (CMP x y))
+       // result: (MOVWconst [int64(int16(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int16(c))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less32F x y)
+       // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (GreaterThan (CMPF y x))
+       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less32U x y)
-       // cond:
-       // result: (LessThanU (CMP x y))
+       // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less64F x y)
+       // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
        // cond:
-       // result: (GreaterThan (CMPD y x))
+       // result: (MOVHstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMGreaterThan)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Less8 x y)
+       // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
        // cond:
-       // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (MOVHstore [off] {sym} ptr x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThan)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVHUreg {
+                       break
+               }
+               x := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less8U x y)
+       // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (MOVWload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMLessThanU)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (LessEqual (FlagEQ))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMMOVWaddr {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARMMOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqual (FlagLT_ULT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWstore {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               off2 := v_1.AuxInt
+               sym2 := v_1.Aux
+               ptr2 := v_1.Args[0]
+               x := v_1.Args[1]
+               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (LessEqual (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [1])
+       // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
+       // cond: sym == nil && !config.nacl
+       // result: (MOVWloadidx ptr idx mem)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               if v_0.Op != OpARMADD {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(sym == nil && !config.nacl) {
+                       break
+               }
+               v.reset(OpARMMOVWloadidx)
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqual (FlagGT_ULT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
+       // cond: sym == nil && !config.nacl
+       // result: (MOVWloadshiftLL ptr idx [c] mem)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMADDshiftLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(sym == nil && !config.nacl) {
+                       break
+               }
+               v.reset(OpARMMOVWloadshiftLL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqual (FlagGT_UGT))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
+       // cond: sym == nil && !config.nacl
+       // result: (MOVWloadshiftRL ptr idx [c] mem)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               if v_0.Op != OpARMADDshiftRL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(sym == nil && !config.nacl) {
+                       break
+               }
+               v.reset(OpARMMOVWloadshiftRL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqual (InvertFlags x))
-       // cond:
-       // result: (GreaterEqual x)
+       // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
+       // cond: sym == nil && !config.nacl
+       // result: (MOVWloadshiftRA ptr idx [c] mem)
        for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               if v_0.Op != OpARMADDshiftRA {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMGreaterEqual)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               mem := v.Args[1]
+               if !(sym == nil && !config.nacl) {
+                       break
+               }
+               v.reset(OpARMMOVWloadshiftRA)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LessEqualU (FlagEQ))
+       // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
+       // cond: isSamePtr(ptr, ptr2)
+       // result: x
+       for {
+               ptr := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWstoreidx {
+                       break
+               }
+               ptr2 := v_2.Args[0]
+               if idx != v_2.Args[1] {
+                       break
+               }
+               x := v_2.Args[2]
+               if !(isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MOVWloadidx ptr (MOVWconst [c]) mem)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVWload [c] ptr mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx (MOVWconst [c]) ptr mem)
+       // cond:
+       // result: (MOVWload [c] ptr mem)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqualU (FlagLT_ULT))
+       // match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVWloadshiftLL ptr idx [c] mem)
+       for {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftLL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
+       // cond:
+       // result: (MOVWloadshiftLL ptr idx [c] mem)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               idx := v_0.Args[0]
+               ptr := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftLL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqualU (FlagLT_UGT))
+       // match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVWloadshiftRL ptr idx [c] mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftRL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqualU (FlagGT_ULT))
+       // match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVWloadshiftRL ptr idx [c] mem)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               idx := v_0.Args[0]
+               ptr := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftRL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqualU (FlagGT_UGT))
+       // match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVWloadshiftRA ptr idx [c] mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftRA)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessEqualU (InvertFlags x))
+       // match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
        // cond:
-       // result: (GreaterEqualU x)
+       // result: (MOVWloadshiftRA ptr idx [c] mem)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMGreaterEqualU)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               idx := v_0.Args[0]
+               ptr := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWloadshiftRA)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LessThan (FlagEQ))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
+       // cond: c==d && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               c := v.AuxInt
+               ptr := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWstoreshiftLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (LessThan (FlagLT_ULT))
-       // cond:
-       // result: (MOVWconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               d := v_2.AuxInt
+               ptr2 := v_2.Args[0]
+               if idx != v_2.Args[1] {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
-               return true
-       }
-       // match: (LessThan (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               x := v_2.Args[2]
+               if !(c == d && isSamePtr(ptr, ptr2)) {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (LessThan (FlagGT_ULT))
+       // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               d := v.AuxInt
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessThan (FlagGT_UGT))
-       // cond:
-       // result: (MOVWconst [0])
+       return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
+       // cond: c==d && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               c := v.AuxInt
+               ptr := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWstoreshiftRA {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               d := v_2.AuxInt
+               ptr2 := v_2.Args[0]
+               if idx != v_2.Args[1] {
+                       break
+               }
+               x := v_2.Args[2]
+               if !(c == d && isSamePtr(ptr, ptr2)) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (LessThan (InvertFlags x))
+       // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
        // cond:
-       // result: (GreaterThan x)
+       // result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               d := v.AuxInt
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMGreaterThan)
-               v.AddArg(x)
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (LessThanU (FlagEQ))
-       // cond:
-       // result: (MOVWconst [0])
+       // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
+       // cond: c==d && isSamePtr(ptr, ptr2)
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               c := v.AuxInt
+               ptr := v.Args[0]
+               idx := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWstoreshiftRL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (LessThanU (FlagLT_ULT))
-       // cond:
-       // result: (MOVWconst [1])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               d := v_2.AuxInt
+               ptr2 := v_2.Args[0]
+               if idx != v_2.Args[1] {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
-               return true
-       }
-       // match: (LessThanU (FlagLT_UGT))
-       // cond:
-       // result: (MOVWconst [0])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               x := v_2.Args[2]
+               if !(c == d && isSamePtr(ptr, ptr2)) {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (LessThanU (FlagGT_ULT))
+       // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               d := v.AuxInt
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               v.reset(OpARMMOVWload)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (LessThanU (FlagGT_UGT))
-       // cond:
-       // result: (MOVWconst [0])
+       return false
+}
+func rewriteValueARM_OpARMMOVWreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWreg x)
+       // cond: x.Uses == 1
+       // result: (MOVWnop x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               x := v.Args[0]
+               if !(x.Uses == 1) {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.reset(OpARMMOVWnop)
+               v.AddArg(x)
                return true
        }
-       // match: (LessThanU (InvertFlags x))
+       // match: (MOVWreg (MOVWconst [c]))
        // cond:
-       // result: (GreaterThanU x)
+       // result: (MOVWconst [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMGreaterThanU)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Load <t> ptr mem)
-       // cond: t.IsBoolean()
-       // result: (MOVBUload ptr mem)
+       // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // cond:
+       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(t.IsBoolean()) {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMMOVBUload)
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is8BitInt(t) && isSigned(t))
-       // result: (MOVBload ptr mem)
+       // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is8BitInt(t) && isSigned(t)) {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWaddr {
                        break
                }
-               v.reset(OpARMMOVBload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Load <t> ptr mem)
-       // cond: (is8BitInt(t) && !isSigned(t))
-       // result: (MOVBUload ptr mem)
-       for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is8BitInt(t) && !isSigned(t)) {
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
                        break
                }
-               v.reset(OpARMMOVBUload)
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
                v.AddArg(ptr)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is16BitInt(t) && isSigned(t))
-       // result: (MOVHload ptr mem)
+       // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
+       // cond: sym == nil && !config.nacl
+       // result: (MOVWstoreidx ptr idx val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t) && isSigned(t)) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpARMMOVHload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Load <t> ptr mem)
-       // cond: (is16BitInt(t) && !isSigned(t))
-       // result: (MOVHUload ptr mem)
-       for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t) && !isSigned(t)) {
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADD {
                        break
                }
-               v.reset(OpARMMOVHUload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Load <t> ptr mem)
-       // cond: (is32BitInt(t) || isPtr(t))
-       // result: (MOVWload ptr mem)
-       for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitInt(t) || isPtr(t)) {
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(sym == nil && !config.nacl) {
                        break
                }
-               v.reset(OpARMMOVWload)
+               v.reset(OpARMMOVWstoreidx)
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is32BitFloat(t)
-       // result: (MOVFload ptr mem)
+       // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
+       // cond: sym == nil && !config.nacl
+       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitFloat(t)) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpARMMOVFload)
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftLL {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(sym == nil && !config.nacl) {
+                       break
+               }
+               v.reset(OpARMMOVWstoreshiftLL)
+               v.AuxInt = c
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is64BitFloat(t)
-       // result: (MOVDload ptr mem)
+       // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
+       // cond: sym == nil && !config.nacl
+       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is64BitFloat(t)) {
+               if v.AuxInt != 0 {
                        break
                }
-               v.reset(OpARMMOVDload)
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftRL {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(sym == nil && !config.nacl) {
+                       break
+               }
+               v.reset(OpARMMOVWstoreshiftRL)
+               v.AuxInt = c
                v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot16 <t> x [c])
-       // cond:
-       // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
-       for {
-               t := v.Type
-               x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARMOR)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
-               v0.AddArg(x)
-               v0.AuxInt = c & 15
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
-               v1.AddArg(x)
-               v1.AuxInt = 16 - c&15
-               v.AddArg(v1)
-               return true
-       }
-}
-func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot32 x [c])
-       // cond:
-       // result: (SRRconst x [32-c&31])
-       for {
-               x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARMSRRconst)
-               v.AddArg(x)
-               v.AuxInt = 32 - c&31
-               return true
-       }
-}
-func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lrot8 <t> x [c])
-       // cond:
-       // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
+       // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
+       // cond: sym == nil && !config.nacl
+       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARMOR)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
-               v0.AddArg(x)
-               v0.AuxInt = c & 7
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
-               v1.AddArg(x)
-               v1.AuxInt = 8 - c&7
-               v.AddArg(v1)
+               if v.AuxInt != 0 {
+                       break
+               }
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDshiftRA {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v_0.Args[0]
+               idx := v_0.Args[1]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(sym == nil && !config.nacl) {
+                       break
+               }
+               v.reset(OpARMMOVWstoreshiftRA)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x16 x y)
+       // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
        // cond:
-       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       // result: (MOVWstore [c] ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v2.AuxInt = 256
-               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v3.AddArg(y)
-               v2.AddArg(v3)
-               v.AddArg(v2)
-               v.AuxInt = 0
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x32 x y)
+       // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
        // cond:
-       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+       // result: (MOVWstore [c] ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v1.AuxInt = 256
-               v1.AddArg(y)
-               v.AddArg(v1)
-               v.AuxInt = 0
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               ptr := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SLLconst x [c])
+       // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
+       // cond:
+       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
        for {
-               x := v.Args[0]
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMSLLconst {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) < 16) {
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftLL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
+       // cond:
+       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMSLLconst)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               idx := v_0.Args[0]
+               ptr := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftLL)
                v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (Lsh16x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (Const16 [0])
+       // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
+       // cond:
+       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
        for {
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMSRLconst {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
-                       break
-               }
-               v.reset(OpConst16)
-               v.AuxInt = 0
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftRL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh16x8  x y)
+       // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
        // cond:
-       // result: (SLL x (ZeroExt8to32 y))
+       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               idx := v_0.Args[0]
+               ptr := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftRL)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x16 x y)
+       // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
        // cond:
-       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v2.AuxInt = 256
-               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v3.AddArg(y)
-               v2.AddArg(v3)
-               v.AddArg(v2)
-               v.AuxInt = 0
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               idx := v_1.Args[0]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftRA)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x32 x y)
+       // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
        // cond:
-       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v1.AuxInt = 256
-               v1.AddArg(y)
-               v.AddArg(v1)
-               v.AuxInt = 0
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               idx := v_0.Args[0]
+               ptr := v.Args[1]
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstoreshiftRA)
+               v.AuxInt = c
+               v.AddArg(ptr)
+               v.AddArg(idx)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SLLconst x [c])
+       // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
+       // cond:
+       // result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
        for {
-               x := v.Args[0]
+               d := v.AuxInt
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (Lsh32x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (Const32 [0])
+       return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
+       // cond:
+       // result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
        for {
+               d := v.AuxInt
+               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
-                       break
-               }
-               v.reset(OpConst32)
-               v.AuxInt = 0
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x8  x y)
+       // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
        // cond:
-       // result: (SLL x (ZeroExt8to32 y))
+       // result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
+               d := v.AuxInt
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               val := v.Args[2]
+               mem := v.Args[3]
+               v.reset(OpARMMOVWstore)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x16 x y)
-       // cond:
-       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       // match: (MUL x (MOVWconst [c]))
+       // cond: int32(c) == -1
+       // result: (RSBconst [0] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v2.AuxInt = 256
-               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v3.AddArg(y)
-               v2.AddArg(v3)
-               v.AddArg(v2)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(int32(c) == -1) {
+                       break
+               }
+               v.reset(OpARMRSBconst)
                v.AuxInt = 0
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x32 x y)
+       // match: (MUL _ (MOVWconst [0]))
        // cond:
-       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+       // result: (MOVWconst [0])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v1.AuxInt = 256
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_1.AuxInt != 0 {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
                v.AuxInt = 0
                return true
        }
-}
-func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SLLconst x [c])
+       // match: (MUL x (MOVWconst [1]))
+       // cond:
+       // result: x
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (MUL x (MOVWconst [c]))
+       // cond: isPowerOfTwo(c)
+       // result: (SLLconst [log2(c)] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) < 8) {
+               if !(isPowerOfTwo(c)) {
                        break
                }
                v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c)
                v.AddArg(x)
-               v.AuxInt = c
                return true
        }
-       // match: (Lsh8x64 _ (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (Const8 [0])
+       // match: (MUL x (MOVWconst [c]))
+       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+       // result: (ADDshiftLL x x [log2(c-1)])
        for {
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
+               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
                        break
                }
-               v.reset(OpConst8)
-               v.AuxInt = 0
+               v.reset(OpARMADDshiftLL)
+               v.AuxInt = log2(c - 1)
+               v.AddArg(x)
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh8x8  x y)
-       // cond:
-       // result: (SLL x (ZeroExt8to32 y))
+       // match: (MUL x (MOVWconst [c]))
+       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+       // result: (RSBshiftLL x x [log2(c+1)])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSLL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+                       break
+               }
+               v.reset(OpARMRSBshiftLL)
+               v.AuxInt = log2(c + 1)
+               v.AddArg(x)
                v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(y)
-               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVBUload [off1+off2] {sym} ptr mem)
+       // match: (MUL x (MOVWconst [c]))
+       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+       // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVBUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 3)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AuxInt = 1
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (MUL x (MOVWconst [c]))
+       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+       // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
                        break
                }
-               v.reset(OpARMMOVBUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 5)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AuxInt = 2
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
-       // result: x
+       // match: (MUL x (MOVWconst [c]))
+       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+       // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVBstore {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+               c := v_1.AuxInt
+               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 7)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v0.AuxInt = 3
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBUreg x:(MOVBUload _ _))
-       // cond:
-       // result: (MOVWreg x)
+       // match: (MUL x (MOVWconst [c]))
+       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+       // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
        for {
                x := v.Args[0]
-               if x.Op != OpARMMOVBUload {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               c := v_1.AuxInt
+               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 9)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AuxInt = 3
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVBUreg (ANDconst [c] x))
-       // cond:
-       // result: (ANDconst [c&0xff] x)
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: int32(c) == -1
+       // result: (RSBconst [0] x)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & 0xff
-               v.AddArg(x)
-               return true
-       }
-       // match: (MOVBUreg x:(MOVBUreg _))
-       // cond:
-       // result: (MOVWreg x)
-       for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBUreg {
+               x := v.Args[1]
+               if !(int32(c) == -1) {
                        break
                }
-               v.reset(OpARMMOVWreg)
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
                v.AddArg(x)
                return true
        }
-       // match: (MOVBUreg (MOVWconst [c]))
+       // match: (MUL (MOVWconst [0]) _)
        // cond:
-       // result: (MOVWconst [int64(uint8(c))])
+       // result: (MOVWconst [0])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               c := v_0.AuxInt
+               if v_0.AuxInt != 0 {
+                       break
+               }
                v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(uint8(c))
+               v.AuxInt = 0
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (MUL (MOVWconst [1]) x)
        // cond:
-       // result: (MOVBload [off1+off2] {sym} ptr mem)
+       // result: x
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               if v_0.AuxInt != 1 {
+                       break
+               }
+               x := v.Args[1]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: isPowerOfTwo(c)
+       // result: (SLLconst [log2(c)] x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(isPowerOfTwo(c)) {
                        break
                }
-               v.reset(OpARMMOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
-       // result: x
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+       // result: (ADDshiftLL x x [log2(c-1)])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVBstore {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               v.reset(OpARMADDshiftLL)
+               v.AuxInt = log2(c - 1)
                v.AddArg(x)
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBreg x:(MOVBload _ _))
-       // cond:
-       // result: (MOVWreg x)
-       for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBload {
-                       break
-               }
-               v.reset(OpARMMOVWreg)
                v.AddArg(x)
                return true
        }
-       // match: (MOVBreg (ANDconst [c] x))
-       // cond: c & 0x80 == 0
-       // result: (ANDconst [c&0x7f] x)
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+       // result: (RSBshiftLL x x [log2(c+1)])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               x := v_0.Args[0]
-               if !(c&0x80 == 0) {
+               x := v.Args[1]
+               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
                        break
                }
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & 0x7f
+               v.reset(OpARMRSBshiftLL)
+               v.AuxInt = log2(c + 1)
+               v.AddArg(x)
                v.AddArg(x)
                return true
        }
-       // match: (MOVBreg x:(MOVBreg _))
-       // cond:
-       // result: (MOVWreg x)
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+       // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBreg {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 3)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AuxInt = 1
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVBreg (MOVWconst [c]))
-       // cond:
-       // result: (MOVWconst [int64(int8(c))])
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+       // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int8(c))
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-       // cond:
-       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[1]
+               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 5)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AuxInt = 2
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+       // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
                        break
                }
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 7)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v0.AuxInt = 3
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
-       // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // match: (MUL (MOVWconst [c]) x)
+       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+       // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVBreg {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
-       // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVBUreg {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
+               v.reset(OpARMSLLconst)
+               v.AuxInt = log2(c / 9)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AuxInt = 3
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+       // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
        // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
+       // result: (MOVWconst [int64(int32(c*d))])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHreg {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
-       // cond:
-       // result: (MOVBstore [off] {sym} ptr x mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               c := v_0.AuxInt
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHUreg {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
+               d := v_1.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(c * d))
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVDload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: int32(c) == -1
+       // result: (SUB a x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(int32(c) == -1) {
                        break
                }
-               v.reset(OpARMMOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMSUB)
+               v.AddArg(a)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (MULA _ (MOVWconst [0]) a)
+       // cond:
+       // result: a
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVDstore {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               if v_1.AuxInt != 0 {
                        break
                }
+               a := v.Args[2]
                v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.Type = a.Type
+               v.AddArg(a)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (MULA x (MOVWconst [1]) a)
        // cond:
-       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+       // result: (ADD x a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               if v_1.AuxInt != 1 {
+                       break
+               }
+               a := v.Args[2]
+               v.reset(OpARMADD)
+               v.AddArg(x)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: isPowerOfTwo(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(isPowerOfTwo(c)) {
                        break
                }
-               v.reset(OpARMMOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVFload [off1+off2] {sym} ptr mem)
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+       // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVFload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AuxInt = log2(c - 1)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+       // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
                        break
                }
-               v.reset(OpARMMOVFload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v0.AuxInt = log2(c + 1)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVFstore {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 3)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AuxInt = 1
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-       // cond:
-       // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVFstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 5)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AuxInt = 2
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
                        break
                }
-               v.reset(OpARMMOVFstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 7)
+               v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v1.AuxInt = 3
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVHUload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+       // match: (MULA x (MOVWconst [c]) a)
+       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVHUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               a := v.Args[2]
+               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 9)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AuxInt = 3
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: int32(c) == -1
+       // result: (SUB a x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(int32(c) == -1) {
                        break
                }
-               v.reset(OpARMMOVHUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMSUB)
+               v.AddArg(a)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
-       // result: x
+       // match: (MULA (MOVWconst [0]) _ a)
+       // cond:
+       // result: a
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHstore {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
+               if v_0.AuxInt != 0 {
                        break
                }
+               a := v.Args[2]
                v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.Type = a.Type
+               v.AddArg(a)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVHUreg x:(MOVBUload _ _))
+       // match: (MULA (MOVWconst [1]) x a)
        // cond:
-       // result: (MOVWreg x)
+       // result: (ADD x a)
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBUload {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
+               if v_0.AuxInt != 1 {
+                       break
+               }
+               x := v.Args[1]
+               a := v.Args[2]
+               v.reset(OpARMADD)
                v.AddArg(x)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVHUreg x:(MOVHUload _ _))
-       // cond:
-       // result: (MOVWreg x)
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: isPowerOfTwo(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVHUload {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(isPowerOfTwo(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVHUreg (ANDconst [c] x))
-       // cond:
-       // result: (ANDconst [c&0xffff] x)
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
+       // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & 0xffff
-               v.AddArg(x)
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v0.AuxInt = log2(c - 1)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVHUreg x:(MOVBUreg _))
-       // cond:
-       // result: (MOVWreg x)
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
+       // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBUreg {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v0.AuxInt = log2(c + 1)
+               v0.AddArg(x)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVHUreg x:(MOVHUreg _))
-       // cond:
-       // result: (MOVWreg x)
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVHUreg {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 3)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AuxInt = 1
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVHUreg (MOVWconst [c]))
-       // cond:
-       // result: (MOVWconst [int64(uint16(c))])
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(uint16(c))
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 5)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AuxInt = 2
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVHload [off1+off2] {sym} ptr mem)
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVHload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+                       break
+               }
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 7)
+               v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
+               v1.AuxInt = 3
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (MULA (MOVWconst [c]) x a)
+       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+       // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_0.AuxInt
+               x := v.Args[1]
+               a := v.Args[2]
+               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
                        break
                }
-               v.reset(OpARMMOVHload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMADD)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = log2(c / 9)
+               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
+               v1.AuxInt = 3
+               v1.AddArg(x)
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v.AddArg(a)
                return true
        }
-       // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
-       // result: x
+       // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+       // cond:
+       // result: (ADDconst [int64(int32(c*d))] a)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHstore {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
+               c := v_0.AuxInt
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               d := v_1.AuxInt
+               a := v.Args[2]
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(c * d))
+               v.AddArg(a)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHreg x:(MOVBload _ _))
+       // match: (MVN (MOVWconst [c]))
        // cond:
-       // result: (MOVWreg x)
+       // result: (MOVWconst [^c])
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBload {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
-               v.AddArg(x)
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = ^c
                return true
        }
-       // match: (MOVHreg x:(MOVBUload _ _))
+       // match: (MVN (SLLconst [c] x))
        // cond:
-       // result: (MOVWreg x)
+       // result: (MVNshiftLL x [c])
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBUload {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMMVNshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (MOVHreg x:(MOVHload _ _))
+       // match: (MVN (SRLconst [c] x))
        // cond:
-       // result: (MOVWreg x)
+       // result: (MVNshiftRL x [c])
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVHload {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWreg)
+               c := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMMVNshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (MOVHreg (ANDconst [c] x))
-       // cond: c & 0x8000 == 0
-       // result: (ANDconst [c&0x7fff] x)
+       // match: (MVN (SRAconst [c] x))
+       // cond:
+       // result: (MVNshiftRA x [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMANDconst {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
                c := v_0.AuxInt
                x := v_0.Args[0]
-               if !(c&0x8000 == 0) {
-                       break
-               }
-               v.reset(OpARMANDconst)
-               v.AuxInt = c & 0x7fff
+               v.reset(OpARMMVNshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (MOVHreg x:(MOVBreg _))
+       // match: (MVN (SLL x y))
        // cond:
-       // result: (MOVWreg x)
+       // result: (MVNshiftLLreg x y)
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBreg {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMMOVWreg)
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpARMMVNshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVHreg x:(MOVBUreg _))
+       // match: (MVN (SRL x y))
        // cond:
-       // result: (MOVWreg x)
+       // result: (MVNshiftRLreg x y)
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVBUreg {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
                        break
                }
-               v.reset(OpARMMOVWreg)
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpARMMVNshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVHreg x:(MOVHreg _))
+       // match: (MVN (SRA x y))
        // cond:
-       // result: (MOVWreg x)
+       // result: (MVNshiftRAreg x y)
        for {
-               x := v.Args[0]
-               if x.Op != OpARMMOVHreg {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
                        break
                }
-               v.reset(OpARMMOVWreg)
+               x := v_0.Args[0]
+               y := v_0.Args[1]
+               v.reset(OpARMMVNshiftRAreg)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVHreg (MOVWconst [c]))
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftLL (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVWconst [int64(int16(c))])
+       // result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int16(c))
+               v.AuxInt = ^int64(uint32(c) << uint64(d))
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (MVNshiftLLreg x (MOVWconst [c]))
        // cond:
-       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+       // result: (MVNshiftLL x [c])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARMMVNshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftRA (MOVWconst [c]) [d])
+       // cond:
+       // result: (MOVWconst [^int64(int32(c)>>uint64(d))])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
+               d := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = ^int64(int32(c) >> uint64(d))
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftRAreg x (MOVWconst [c]))
+       // cond:
+       // result: (MVNshiftRA x [c])
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARMMVNshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftRL (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVHstore [off] {sym} ptr x mem)
+       // result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHreg {
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(x)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = ^int64(uint32(c) >> uint64(d))
                return true
        }
-       // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+       return false
+}
+func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (MVNshiftRLreg x (MOVWconst [c]))
        // cond:
-       // result: (MOVHstore [off] {sym} ptr x mem)
+       // result: (MVNshiftRL x [c])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVHUreg {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               x := v_1.Args[0]
-               mem := v.Args[2]
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               c := v_1.AuxInt
+               v.reset(OpARMMVNshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (NotEqual (FlagEQ))
        // cond:
-       // result: (MOVWload [off1+off2] {sym} ptr mem)
+       // result: (MOVWconst [0])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               if v_0.Op != OpARMFlagEQ {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMMOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (NotEqual (FlagLT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               if v_0.Op != OpARMFlagLT_ULT {
                        break
                }
-               v.reset(OpARMMOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-       // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (NotEqual (FlagLT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWstore {
-                       break
-               }
-               off2 := v_1.AuxInt
-               sym2 := v_1.Aux
-               ptr2 := v_1.Args[0]
-               x := v_1.Args[1]
-               if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMFlagLT_UGT {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
-       // cond: sym == nil && !config.nacl
-       // result: (MOVWloadidx ptr idx mem)
+       // match: (NotEqual (FlagGT_ULT))
+       // cond:
+       // result: (MOVWconst [1])
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADD {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               mem := v.Args[1]
-               if !(sym == nil && !config.nacl) {
+               if v_0.Op != OpARMFlagGT_ULT {
                        break
                }
-               v.reset(OpARMMOVWloadidx)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(mem)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
-       // cond: sym == nil && !config.nacl
-       // result: (MOVWloadshiftLL ptr idx [c] mem)
+       // match: (NotEqual (FlagGT_UGT))
+       // cond:
+       // result: (MOVWconst [1])
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDshiftLL {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               c := v_0.AuxInt
-               mem := v.Args[1]
-               if !(sym == nil && !config.nacl) {
+               if v_0.Op != OpARMFlagGT_UGT {
                        break
                }
-               v.reset(OpARMMOVWloadshiftLL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(mem)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 1
                return true
        }
-       // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
-       // cond: sym == nil && !config.nacl
-       // result: (MOVWloadshiftRL ptr idx [c] mem)
+       // match: (NotEqual (InvertFlags x))
+       // cond:
+       // result: (NotEqual x)
        for {
-               if v.AuxInt != 0 {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMInvertFlags {
                        break
                }
-               sym := v.Aux
+               x := v_0.Args[0]
+               v.reset(OpARMNotEqual)
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (OR (MOVWconst [c]) x)
+       // cond:
+       // result: (ORconst [c] x)
+       for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDshiftRL {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
                c := v_0.AuxInt
-               mem := v.Args[1]
-               if !(sym == nil && !config.nacl) {
+               x := v.Args[1]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (OR x (MOVWconst [c]))
+       // cond:
+       // result: (ORconst [c] x)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWloadshiftRL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               c := v_1.AuxInt
+               v.reset(OpARMORconst)
                v.AuxInt = c
-               v.AddArg(mem)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
-       // cond: sym == nil && !config.nacl
-       // result: (MOVWloadshiftRA ptr idx [c] mem)
+       // match: (OR x (SLLconst [c] y))
+       // cond:
+       // result: (ORshiftLL x y [c])
        for {
-               if v.AuxInt != 0 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               sym := v.Aux
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMORshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       // match: (OR (SLLconst [c] y) x)
+       // cond:
+       // result: (ORshiftLL x y [c])
+       for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDshiftRA {
+               if v_0.Op != OpARMSLLconst {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
                c := v_0.AuxInt
-               mem := v.Args[1]
-               if !(sym == nil && !config.nacl) {
-                       break
-               }
-               v.reset(OpARMMOVWloadshiftRA)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMORshiftLL)
                v.AuxInt = c
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
-       // cond: isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (OR x (SRLconst [c] y))
+       // cond:
+       // result: (ORshiftRL x y [c])
        for {
-               ptr := v.Args[0]
-               idx := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWstoreidx {
-                       break
-               }
-               ptr2 := v_2.Args[0]
-               if idx != v_2.Args[1] {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               x := v_2.Args[2]
-               if !(isSamePtr(ptr, ptr2)) {
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMORshiftRL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       // match: (OR (SRLconst [c] y) x)
+       // cond:
+       // result: (ORshiftRL x y [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMORshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWloadidx ptr (MOVWconst [c]) mem)
+       // match: (OR x (SRAconst [c] y))
        // cond:
-       // result: (MOVWload [c] ptr mem)
+       // result: (ORshiftRA x y [c])
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRAconst {
                        break
                }
                c := v_1.AuxInt
-               mem := v.Args[2]
-               v.reset(OpARMMOVWload)
+               y := v_1.Args[0]
+               v.reset(OpARMORshiftRA)
                v.AuxInt = c
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWloadidx (MOVWconst [c]) ptr mem)
+       // match: (OR (SRAconst [c] y) x)
        // cond:
-       // result: (MOVWload [c] ptr mem)
+       // result: (ORshiftRA x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
                c := v_0.AuxInt
-               ptr := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVWload)
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMORshiftRA)
                v.AuxInt = c
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
+       // match: (OR x (SLL y z))
        // cond:
-       // result: (MOVWloadshiftLL ptr idx [c] mem)
+       // result: (ORshiftLLreg x y z)
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
+               if v_1.Op != OpARMSLL {
                        break
                }
-               idx := v_1.Args[0]
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               v.reset(OpARMMOVWloadshiftLL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(mem)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMORshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
+       // match: (OR (SLL y z) x)
        // cond:
-       // result: (MOVWloadshiftLL ptr idx [c] mem)
+       // result: (ORshiftLLreg x y z)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSLLconst {
+               if v_0.Op != OpARMSLL {
                        break
                }
-               idx := v_0.Args[0]
-               c := v_0.AuxInt
-               ptr := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVWloadshiftLL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(mem)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMORshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
+       // match: (OR x (SRL y z))
        // cond:
-       // result: (MOVWloadshiftRL ptr idx [c] mem)
+       // result: (ORshiftRLreg x y z)
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
+               if v_1.Op != OpARMSRL {
                        break
                }
-               idx := v_1.Args[0]
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               v.reset(OpARMMOVWloadshiftRL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(mem)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMORshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
+       // match: (OR (SRL y z) x)
        // cond:
-       // result: (MOVWloadshiftRL ptr idx [c] mem)
+       // result: (ORshiftRLreg x y z)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
+               if v_0.Op != OpARMSRL {
                        break
                }
-               idx := v_0.Args[0]
-               c := v_0.AuxInt
-               ptr := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVWloadshiftRL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(mem)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMORshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
+       // match: (OR x (SRA y z))
        // cond:
-       // result: (MOVWloadshiftRA ptr idx [c] mem)
+       // result: (ORshiftRAreg x y z)
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
+               if v_1.Op != OpARMSRA {
                        break
                }
-               idx := v_1.Args[0]
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               v.reset(OpARMMOVWloadshiftRA)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(mem)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMORshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
+       // match: (OR (SRA y z) x)
        // cond:
-       // result: (MOVWloadshiftRA ptr idx [c] mem)
+       // result: (ORshiftRAreg x y z)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRAconst {
+               if v_0.Op != OpARMSRA {
                        break
                }
-               idx := v_0.Args[0]
-               c := v_0.AuxInt
-               ptr := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVWloadshiftRA)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(mem)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMORshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (OR x x)
+       // cond:
+       // result: x
+       for {
+               x := v.Args[0]
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
-       // cond: c==d && isSamePtr(ptr, ptr2)
+       // match: (ORconst [0] x)
+       // cond:
        // result: x
        for {
-               ptr := v.Args[0]
-               idx := v.Args[1]
-               c := v.AuxInt
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWstoreshiftLL {
+               if v.AuxInt != 0 {
                        break
                }
-               ptr2 := v_2.Args[0]
-               if idx != v_2.Args[1] {
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (ORconst [c] _)
+       // cond: int32(c)==-1
+       // result: (MOVWconst [-1])
+       for {
+               c := v.AuxInt
+               if !(int32(c) == -1) {
                        break
-               }
-               d := v_2.AuxInt
-               x := v_2.Args[2]
-               if !(c == d && isSamePtr(ptr, ptr2)) {
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = -1
+               return true
+       }
+       // match: (ORconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [c|d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c | d
                return true
        }
-       // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
+       // match: (ORconst [c] (ORconst [d] x))
        // cond:
-       // result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
+       // result: (ORconst [c|d] x)
        for {
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMORconst {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               mem := v.Args[2]
-               v.reset(OpARMMOVWload)
-               v.AuxInt = int64(uint32(c) << uint64(d))
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMORconst)
+               v.AuxInt = c | d
+               v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
-       // cond: c==d && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (ORshiftLL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (ORconst [c] (SLLconst <x.Type> x [d]))
        for {
-               ptr := v.Args[0]
-               idx := v.Args[1]
-               c := v.AuxInt
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWstoreshiftRA {
-                       break
-               }
-               ptr2 := v_2.Args[0]
-               if idx != v_2.Args[1] {
-                       break
-               }
-               d := v_2.AuxInt
-               x := v_2.Args[2]
-               if !(c == d && isSamePtr(ptr, ptr2)) {
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
+       // match: (ORshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
+       // result: (ORconst x [int64(uint32(c)<<uint64(d))])
        for {
-               ptr := v.Args[0]
+               d := v.AuxInt
+               x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
-               mem := v.Args[2]
-               v.reset(OpARMMOVWload)
-               v.AuxInt = int64(int32(c) >> uint64(d))
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARMORconst)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
-       // cond: c==d && isSamePtr(ptr, ptr2)
-       // result: x
+       // match: (ORshiftLL x y:(SLLconst x [c]) [d])
+       // cond: c==d
+       // result: y
        for {
-               ptr := v.Args[0]
-               idx := v.Args[1]
-               c := v.AuxInt
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWstoreshiftRL {
+               d := v.AuxInt
+               x := v.Args[0]
+               y := v.Args[1]
+               if y.Op != OpARMSLLconst {
                        break
                }
-               ptr2 := v_2.Args[0]
-               if idx != v_2.Args[1] {
+               c := y.AuxInt
+               if x != y.Args[0] {
                        break
                }
-               d := v_2.AuxInt
-               x := v_2.Args[2]
-               if !(c == d && isSamePtr(ptr, ptr2)) {
+               if !(c == d) {
                        break
                }
                v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               v.Type = y.Type
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
+       return false
+}
+func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
+       // result: (ORconst [c] (SLL <x.Type> x y))
        for {
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               mem := v.Args[2]
-               v.reset(OpARMMOVWload)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVWreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWreg x)
-       // cond: x.Uses == 1
-       // result: (MOVWnop x)
+       // match: (ORshiftLLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (ORshiftLL x y [c])
        for {
                x := v.Args[0]
-               if !(x.Uses == 1) {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWnop)
+               c := v_2.AuxInt
+               v.reset(OpARMORshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWreg (MOVWconst [c]))
+       return false
+}
+func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (MOVWconst [c])
+       // result: (ORconst [c] (SRAconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
+               x := v.Args[1]
+               v.reset(OpARMORconst)
                v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (ORshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+       // result: (ORconst x [int64(int32(c)>>uint64(d))])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARMORconst)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (ORshiftRA x y:(SRAconst x [c]) [d])
+       // cond: c==d
+       // result: y
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWaddr {
+               d := v.AuxInt
+               x := v.Args[0]
+               y := v.Args[1]
+               if y.Op != OpARMSRAconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := y.AuxInt
+               if x != y.Args[0] {
                        break
                }
-               v.reset(OpARMMOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpCopy)
+               v.Type = y.Type
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
-       // cond: sym == nil && !config.nacl
-       // result: (MOVWstoreidx ptr idx val mem)
+       return false
+}
+func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftRAreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (ORconst [c] (SRA <x.Type> x y))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADD {
-                       break
-               }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(sym == nil && !config.nacl) {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWstoreidx)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
-       // cond: sym == nil && !config.nacl
-       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+       // match: (ORshiftRAreg x y (MOVWconst [c]))
+       // cond:
+       // result: (ORshiftRA x y [c])
        for {
-               if v.AuxInt != 0 {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               sym := v.Aux
+               c := v_2.AuxInt
+               v.reset(OpARMORshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftRL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+       for {
+               d := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDshiftLL {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
                c := v_0.AuxInt
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(sym == nil && !config.nacl) {
+               x := v.Args[1]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ORshiftRL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (ORconst x [int64(uint32(c)>>uint64(d))])
+       for {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWstoreshiftLL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               v.reset(OpARMORconst)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
-       // cond: sym == nil && !config.nacl
-       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+       // match: (ORshiftRL x y:(SRLconst x [c]) [d])
+       // cond: c==d
+       // result: y
        for {
-               if v.AuxInt != 0 {
+               d := v.AuxInt
+               x := v.Args[0]
+               y := v.Args[1]
+               if y.Op != OpARMSRLconst {
                        break
                }
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDshiftRL {
+               c := y.AuxInt
+               if x != y.Args[0] {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
-               c := v_0.AuxInt
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(sym == nil && !config.nacl) {
+               if !(c == d) {
                        break
                }
-               v.reset(OpARMMOVWstoreshiftRL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
-               v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpCopy)
+               v.Type = y.Type
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
-       // cond: sym == nil && !config.nacl
-       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+       return false
+}
+func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ORshiftRLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (ORconst [c] (SRL <x.Type> x y))
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               sym := v.Aux
                v_0 := v.Args[0]
-               if v_0.Op != OpARMADDshiftRA {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               ptr := v_0.Args[0]
-               idx := v_0.Args[1]
                c := v_0.AuxInt
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(sym == nil && !config.nacl) {
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMORconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (ORshiftRLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (ORshiftRL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMMOVWstoreshiftRA)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               c := v_2.AuxInt
+               v.reset(OpARMORshiftRL)
                v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
+       // match: (RSB (MOVWconst [c]) x)
        // cond:
-       // result: (MOVWstore [c] ptr val mem)
+       // result: (SUBconst [c] x)
        for {
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               c := v_1.AuxInt
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstore)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMSUBconst)
                v.AuxInt = c
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
+       // match: (RSB x (MOVWconst [c]))
        // cond:
-       // result: (MOVWstore [c] ptr val mem)
+       // result: (RSBconst [c] x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               c := v_0.AuxInt
-               ptr := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstore)
+               c := v_1.AuxInt
+               v.reset(OpARMRSBconst)
                v.AuxInt = c
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
+       // match: (RSB x (SLLconst [c] y))
        // cond:
-       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+       // result: (RSBshiftLL x y [c])
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMSLLconst {
                        break
                }
-               idx := v_1.Args[0]
                c := v_1.AuxInt
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstoreshiftLL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               y := v_1.Args[0]
+               v.reset(OpARMRSBshiftLL)
                v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
+       // match: (RSB (SLLconst [c] y) x)
        // cond:
-       // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+       // result: (SUBshiftLL x y [c])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSLLconst {
-                       break
-               }
-               idx := v_0.Args[0]
-               c := v_0.AuxInt
-               ptr := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstoreshiftLL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftLL)
                v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
+       // match: (RSB x (SRLconst [c] y))
        // cond:
-       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+       // result: (RSBshiftRL x y [c])
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMSRLconst {
                        break
                }
-               idx := v_1.Args[0]
                c := v_1.AuxInt
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstoreshiftRL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               y := v_1.Args[0]
+               v.reset(OpARMRSBshiftRL)
                v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
+       // match: (RSB (SRLconst [c] y) x)
        // cond:
-       // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+       // result: (SUBshiftRL x y [c])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSRLconst {
                        break
                }
-               idx := v_0.Args[0]
                c := v_0.AuxInt
-               ptr := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstoreshiftRL)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftRL)
                v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
+       // match: (RSB x (SRAconst [c] y))
        // cond:
-       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+       // result: (RSBshiftRA x y [c])
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMSRAconst {
                        break
                }
-               idx := v_1.Args[0]
                c := v_1.AuxInt
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstoreshiftRA)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               y := v_1.Args[0]
+               v.reset(OpARMRSBshiftRA)
                v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
+       // match: (RSB (SRAconst [c] y) x)
        // cond:
-       // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+       // result: (SUBshiftRA x y [c])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSRAconst {
                        break
                }
-               idx := v_0.Args[0]
                c := v_0.AuxInt
-               ptr := v.Args[1]
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstoreshiftRA)
-               v.AddArg(ptr)
-               v.AddArg(idx)
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftRA)
                v.AuxInt = c
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
+       // match: (RSB x (SLL y z))
        // cond:
-       // result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
+       // result: (RSBshiftLLreg x y z)
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSLL {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstore)
-               v.AuxInt = int64(uint32(c) << uint64(d))
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMRSBshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
+       // match: (RSB (SLL y z) x)
        // cond:
-       // result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
+       // result: (SUBshiftLLreg x y z)
        for {
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstore)
-               v.AuxInt = int64(int32(c) >> uint64(d))
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
+       // match: (RSB x (SRL y z))
        // cond:
-       // result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
+       // result: (RSBshiftRLreg x y z)
        for {
-               ptr := v.Args[0]
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRL {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               val := v.Args[2]
-               mem := v.Args[3]
-               v.reset(OpARMMOVWstore)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMRSBshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MUL x (MOVWconst [c]))
-       // cond: int32(c) == -1
-       // result: (RSBconst [0] x)
+       // match: (RSB (SRL y z) x)
+       // cond:
+       // result: (SUBshiftRLreg x y z)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (RSB x (SRA y z))
+       // cond:
+       // result: (RSBshiftRAreg x y z)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRA {
                        break
                }
-               c := v_1.AuxInt
-               if !(int32(c) == -1) {
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMRSBshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               return true
+       }
+       // match: (RSB (SRA y z) x)
+       // cond:
+       // result: (SUBshiftRAreg x y z)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
                        break
                }
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMSUBshiftRAreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (MUL _ (MOVWconst [0]))
+       // match: (RSB x x)
        // cond:
        // result: (MOVWconst [0])
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 0 {
+               x := v.Args[0]
+               if x != v.Args[1] {
                        break
                }
                v.reset(OpARMMOVWconst)
                v.AuxInt = 0
                return true
        }
-       // match: (MUL x (MOVWconst [1]))
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: x
+       // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (MUL x (MOVWconst [c]))
-       // cond: isPowerOfTwo(c)
-       // result: (SLLconst [log2(c)] x)
+       // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(isPowerOfTwo(c)) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c)
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                v.AddArg(x)
                return true
        }
-       // match: (MUL x (MOVWconst [c]))
-       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
-       // result: (ADDshiftLL x x [log2(c-1)])
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBSconst [c] (SLL <x.Type> x y))
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMADDshiftLL)
-               v.AddArg(x)
-               v.AddArg(x)
-               v.AuxInt = log2(c - 1)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (MUL x (MOVWconst [c]))
-       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
-       // result: (RSBshiftLL x x [log2(c+1)])
+       // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBSshiftLL x y [c])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMRSBshiftLL)
-               v.AddArg(x)
+               c := v_2.AuxInt
+               v.reset(OpARMRSBSshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AuxInt = log2(c + 1)
+               v.AddArg(y)
                return true
        }
-       // match: (MUL x (MOVWconst [c]))
-       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-       // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c / 3)
-               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v0.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AuxInt = d
                v0.AddArg(x)
-               v0.AuxInt = 1
                v.AddArg(v0)
                return true
        }
-       // match: (MUL x (MOVWconst [c]))
-       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-       // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+       // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBSconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBSconst [c] (SRA <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c / 5)
-               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v0.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
                v0.AddArg(x)
-               v0.AuxInt = 2
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-       // match: (MUL x (MOVWconst [c]))
-       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-       // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+       // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBSshiftRA x y [c])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               c := v_1.AuxInt
-               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+               c := v_2.AuxInt
+               v.reset(OpARMRSBSshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+       for {
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c / 7)
-               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-               v0.AddArg(x)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AuxInt = d
                v0.AddArg(x)
-               v0.AuxInt = 3
                v.AddArg(v0)
                return true
        }
-       // match: (MUL x (MOVWconst [c]))
-       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-       // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+       // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c / 9)
-               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v0.AuxInt = 3
-               v.AddArg(v0)
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: int32(c) == -1
-       // result: (RSBconst [0] x)
+       return false
+}
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBSconst [c] (SRL <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -9445,127 +9432,168 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               if !(int32(c) == -1) {
+               y := v.Args[2]
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBSshiftRL x y [c])
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
+               c := v_2.AuxInt
+               v.reset(OpARMRSBSshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MUL (MOVWconst [0]) _)
+       return false
+}
+func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBconst [c] (MOVWconst [d]))
        // cond:
-       // result: (MOVWconst [0])
+       // result: (MOVWconst [int64(int32(c-d))])
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               if v_0.AuxInt != 0 {
-                       break
-               }
+               d := v_0.AuxInt
                v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.AuxInt = int64(int32(c - d))
                return true
        }
-       // match: (MUL (MOVWconst [1]) x)
+       // match: (RSBconst [c] (RSBconst [d] x))
        // cond:
-       // result: x
+       // result: (ADDconst [int64(int32(c-d))] x)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_0.AuxInt != 1 {
+               if v_0.Op != OpARMRSBconst {
                        break
                }
-               x := v.Args[1]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(c - d))
                v.AddArg(x)
                return true
        }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: isPowerOfTwo(c)
-       // result: (SLLconst [log2(c)] x)
+       // match: (RSBconst [c] (ADDconst [d] x))
+       // cond:
+       // result: (RSBconst [int64(int32(c-d))] x)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(isPowerOfTwo(c)) {
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(int32(c - d))
                v.AddArg(x)
                return true
        }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
-       // result: (ADDshiftLL x x [log2(c-1)])
+       // match: (RSBconst [c] (SUBconst [d] x))
+       // cond:
+       // result: (RSBconst [int64(int32(c+d))] x)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+               if v_0.Op != OpARMSUBconst {
                        break
                }
-               v.reset(OpARMADDshiftLL)
-               v.AddArg(x)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(int32(c + d))
                v.AddArg(x)
-               v.AuxInt = log2(c - 1)
                return true
        }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
-       // result: (RSBshiftLL x x [log2(c+1)])
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftLL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (RSBshiftLL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBconst x [int64(uint32(c)<<uint64(d))])
+       for {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMRSBshiftLL)
-               v.AddArg(x)
+               c := v_1.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                v.AddArg(x)
-               v.AuxInt = log2(c + 1)
                return true
        }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-       // result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
+       // match: (RSBshiftLL x (SLLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+               c := v_1.AuxInt
+               if x != v_1.Args[0] {
                        break
                }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c / 3)
-               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v0.AuxInt = 1
-               v.AddArg(v0)
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-       // result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBconst [c] (SLL <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -9573,314 +9601,329 @@ func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c / 5)
-               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v0.AddArg(x)
+               y := v.Args[2]
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
                v0.AddArg(x)
-               v0.AuxInt = 2
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-       // result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
+       // match: (RSBshiftLLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBshiftLL x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c / 7)
-               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v0.AuxInt = 3
-               v.AddArg(v0)
+               c := v_2.AuxInt
+               v.reset(OpARMRSBshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (MUL (MOVWconst [c]) x)
-       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-       // result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftRA (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-                       break
-               }
-               v.reset(OpARMSLLconst)
-               v.AuxInt = log2(c / 9)
-               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v0.AddArg(x)
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AuxInt = d
                v0.AddArg(x)
-               v0.AuxInt = 3
                v.AddArg(v0)
                return true
        }
-       // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+       // match: (RSBshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (MOVWconst [int64(int32(c*d))])
+       // result: (RSBconst x [int64(int32(c)>>uint64(d))])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
+               d := v.AuxInt
+               x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
-               d := v_1.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(c * d))
+               c := v_1.AuxInt
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: int32(c) == -1
-       // result: (SUB a x)
+       // match: (RSBshiftRA x (SRAconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRAconst {
                        break
                }
                c := v_1.AuxInt
-               a := v.Args[2]
-               if !(int32(c) == -1) {
+               if x != v_1.Args[0] {
                        break
                }
-               v.reset(OpARMSUB)
-               v.AddArg(a)
-               v.AddArg(x)
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULA _ (MOVWconst [0]) a)
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: a
-       for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 0 {
+       // result: (SUBconst [c] (SRA <x.Type> x y))
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               a := v.Args[2]
-               v.reset(OpCopy)
-               v.Type = a.Type
-               v.AddArg(a)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (MULA x (MOVWconst [1]) a)
+       // match: (RSBshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (ADD x a)
+       // result: (RSBshiftRA x y [c])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_1.AuxInt != 1 {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               a := v.Args[2]
-               v.reset(OpARMADD)
+               c := v_2.AuxInt
+               v.reset(OpARMRSBshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(a)
+               v.AddArg(y)
                return true
        }
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: isPowerOfTwo(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftRL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               a := v.Args[2]
-               if !(isPowerOfTwo(c)) {
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AuxInt = d
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AddArg(a)
                return true
        }
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
-       // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+       // match: (RSBshiftRL x (MOVWconst [c]) [d])
+       // cond:
+       // result: (RSBconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               a := v.Args[2]
-               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
-                       break
-               }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v0.AuxInt = log2(c - 1)
-               v.AddArg(v0)
-               v.AddArg(a)
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
-       // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+       // match: (RSBshiftRL x (SRLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRLconst {
                        break
                }
                c := v_1.AuxInt
-               a := v.Args[2]
-               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+               if x != v_1.Args[0] {
                        break
                }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(x)
-               v0.AuxInt = log2(c + 1)
-               v.AddArg(v0)
-               v.AddArg(a)
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+       return false
+}
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+       // cond:
+       // result: (SUBconst [c] (SRL <x.Type> x y))
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               a := v.Args[2]
-               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c / 3)
-               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v1.AddArg(x)
-               v1.AddArg(x)
-               v1.AuxInt = 1
-               v0.AddArg(v1)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(a)
                return true
        }
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+       // match: (RSBshiftRLreg x y (MOVWconst [c]))
+       // cond:
+       // result: (RSBshiftRL x y [c])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               c := v_1.AuxInt
-               a := v.Args[2]
-               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+               c := v_2.AuxInt
+               v.reset(OpARMRSBshiftRL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCconst [c] (ADDconst [d] x) flags)
+       // cond:
+       // result: (RSCconst [int64(int32(c-d))] x flags)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
                        break
                }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c / 5)
-               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v1.AddArg(x)
-               v1.AddArg(x)
-               v1.AuxInt = 2
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v.AddArg(a)
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               flags := v.Args[1]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = int64(int32(c - d))
+               v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+       // match: (RSCconst [c] (SUBconst [d] x) flags)
+       // cond:
+       // result: (RSCconst [int64(int32(c+d))] x flags)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSUBconst {
                        break
                }
-               c := v_1.AuxInt
-               a := v.Args[2]
-               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               flags := v.Args[1]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = int64(int32(c + d))
+               v.AddArg(x)
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+       // cond:
+       // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+       for {
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMADD)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c / 7)
-               v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-               v1.AddArg(x)
-               v1.AddArg(x)
-               v1.AuxInt = 3
-               v0.AddArg(v1)
+               v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
-               v.AddArg(a)
+               v.AddArg(flags)
                return true
-       }
-       // match: (MULA x (MOVWconst [c]) a)
-       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+       }
+       // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               a := v.Args[2]
-               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-                       break
-               }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c / 9)
-               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v1.AddArg(x)
-               v1.AddArg(x)
-               v1.AuxInt = 3
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v.AddArg(a)
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: int32(c) == -1
-       // result: (SUB a x)
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+       // cond:
+       // result: (SBCconst [c] (SLL <x.Type> x y) flags)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -9888,53 +9931,88 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               a := v.Args[2]
-               if !(int32(c) == -1) {
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (RSCshiftLL x y [c] flags)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMSUB)
-               v.AddArg(a)
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMRSCshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [0]) _ a)
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
        // cond:
-       // result: a
+       // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
-               if v_0.AuxInt != 0 {
-                       break
-               }
-               a := v.Args[2]
-               v.reset(OpCopy)
-               v.Type = a.Type
-               v.AddArg(a)
+               c := v_0.AuxInt
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [1]) x a)
+       // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
        // cond:
-       // result: (ADD x a)
+       // result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               if v_0.AuxInt != 1 {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               x := v.Args[1]
-               a := v.Args[2]
-               v.reset(OpARMADD)
+               c := v_1.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                v.AddArg(x)
-               v.AddArg(a)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: isPowerOfTwo(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+       // cond:
+       // result: (SBCconst [c] (SRA <x.Type> x y) flags)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -9942,44 +10020,88 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               a := v.Args[2]
-               if !(isPowerOfTwo(c)) {
-                       break
-               }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c)
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(a)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: isPowerOfTwo(c-1) && int32(c) >= 3
-       // result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
+       // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (RSCshiftRA x y [c] flags)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMRSCshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+       // cond:
+       // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               a := v.Args[2]
-               if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
-                       break
-               }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v0.AddArg(x)
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AuxInt = d
                v0.AddArg(x)
-               v0.AuxInt = log2(c - 1)
                v.AddArg(v0)
-               v.AddArg(a)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: isPowerOfTwo(c+1) && int32(c) >= 7
-       // result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
+       // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
+       for {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
+       // cond:
+       // result: (SBCconst [c] (SRL <x.Type> x y) flags)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -9987,22 +10109,44 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               a := v.Args[2]
-               if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
-                       break
-               }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-               v0.AddArg(x)
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
                v0.AddArg(x)
-               v0.AuxInt = log2(c + 1)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(a)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
+       // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (RSCshiftRL x y [c] flags)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMRSCshiftRL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBC (MOVWconst [c]) x flags)
+       // cond:
+       // result: (RSCconst [c] x flags)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -10010,1361 +10154,1499 @@ func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               a := v.Args[2]
-               if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
-                       break
-               }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c / 3)
-               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v1.AddArg(x)
-               v1.AddArg(x)
-               v1.AuxInt = 1
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v.AddArg(a)
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
+       // match: (SBC x (MOVWconst [c]) flags)
+       // cond:
+       // result: (SBCconst [c] x flags)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               a := v.Args[2]
-               if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+               c := v_1.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (SBC x (SLLconst [c] y) flags)
+       // cond:
+       // result: (SBCshiftLL x y [c] flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c / 5)
-               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v1.AddArg(x)
-               v1.AddArg(x)
-               v1.AuxInt = 2
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v.AddArg(a)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
+       // match: (SBC (SLLconst [c] y) x flags)
+       // cond:
+       // result: (RSCshiftLL x y [c] flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSLLconst {
                        break
                }
                c := v_0.AuxInt
+               y := v_0.Args[0]
                x := v.Args[1]
-               a := v.Args[2]
-               if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (SBC x (SRLconst [c] y) flags)
+       // cond:
+       // result: (SBCshiftRL x y [c] flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c / 7)
-               v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-               v1.AddArg(x)
-               v1.AddArg(x)
-               v1.AuxInt = 3
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v.AddArg(a)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftRL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) x a)
-       // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-       // result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
+       // match: (SBC (SRLconst [c] y) x flags)
+       // cond:
+       // result: (RSCshiftRL x y [c] flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSRLconst {
                        break
                }
                c := v_0.AuxInt
+               y := v_0.Args[0]
                x := v.Args[1]
-               a := v.Args[2]
-               if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftRL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (SBC x (SRAconst [c] y) flags)
+       // cond:
+       // result: (SBCshiftRA x y [c] flags)
+       for {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               v.reset(OpARMADD)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AuxInt = log2(c / 9)
-               v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-               v1.AddArg(x)
-               v1.AddArg(x)
-               v1.AuxInt = 3
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v.AddArg(a)
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
-       // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+       // match: (SBC (SRAconst [c] y) x flags)
        // cond:
-       // result: (ADDconst [int64(int32(c*d))] a)
+       // result: (RSCshiftRA x y [c] flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
                c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (SBC x (SLL y z) flags)
+       // cond:
+       // result: (SBCshiftLLreg x y z flags)
+       for {
+               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSLL {
                        break
                }
-               d := v_1.AuxInt
-               a := v.Args[2]
-               v.reset(OpARMADDconst)
-               v.AuxInt = int64(int32(c * d))
-               v.AddArg(a)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MVN (MOVWconst [c]))
+       // match: (SBC (SLL y z) x flags)
        // cond:
-       // result: (MOVWconst [^c])
+       // result: (RSCshiftLLreg x y z flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSLL {
                        break
                }
-               c := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = ^c
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (MVN (SLLconst [c] x))
+       // match: (SBC x (SRL y z) flags)
        // cond:
-       // result: (MVNshiftLL x [c])
+       // result: (SBCshiftRLreg x y z flags)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLLconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMMVNshiftLL)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftRLreg)
                v.AddArg(x)
-               v.AuxInt = c
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (MVN (SRLconst [c] x))
+       // match: (SBC (SRL y z) x flags)
        // cond:
-       // result: (MVNshiftRL x [c])
+       // result: (RSCshiftRLreg x y z flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
+               if v_0.Op != OpARMSRL {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMMVNshiftRL)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftRLreg)
                v.AddArg(x)
-               v.AuxInt = c
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (MVN (SRAconst [c] x))
+       // match: (SBC x (SRA y z) flags)
        // cond:
-       // result: (MVNshiftRA x [c])
+       // result: (SBCshiftRAreg x y z flags)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRAconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
                        break
                }
-               c := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMMVNshiftRA)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMSBCshiftRAreg)
                v.AddArg(x)
-               v.AuxInt = c
+               v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (MVN (SLL x y))
+       // match: (SBC (SRA y z) x flags)
        // cond:
-       // result: (MVNshiftLLreg x y)
+       // result: (RSCshiftRAreg x y z flags)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSLL {
+               if v_0.Op != OpARMSRA {
                        break
                }
-               x := v_0.Args[0]
-               y := v_0.Args[1]
-               v.reset(OpARMMVNshiftLLreg)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCshiftRAreg)
                v.AddArg(x)
                v.AddArg(y)
+               v.AddArg(z)
+               v.AddArg(flags)
                return true
        }
-       // match: (MVN (SRL x y))
+       return false
+}
+func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SBCconst [c] (ADDconst [d] x) flags)
        // cond:
-       // result: (MVNshiftRLreg x y)
+       // result: (SBCconst [int64(int32(c-d))] x flags)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRL {
+               if v_0.Op != OpARMADDconst {
                        break
                }
+               d := v_0.AuxInt
                x := v_0.Args[0]
-               y := v_0.Args[1]
-               v.reset(OpARMMVNshiftRLreg)
+               flags := v.Args[1]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = int64(int32(c - d))
                v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
-       // match: (MVN (SRA x y))
+       // match: (SBCconst [c] (SUBconst [d] x) flags)
        // cond:
-       // result: (MVNshiftRAreg x y)
+       // result: (SBCconst [int64(int32(c+d))] x flags)
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRA {
+               if v_0.Op != OpARMSUBconst {
                        break
                }
+               d := v_0.AuxInt
                x := v_0.Args[0]
-               y := v_0.Args[1]
-               v.reset(OpARMMVNshiftRAreg)
+               flags := v.Args[1]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = int64(int32(c + d))
                v.AddArg(x)
-               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MVNshiftLL (MOVWconst [c]) [d])
+       // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
        // cond:
-       // result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
+       // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = ^int64(uint32(c) << uint64(d))
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MVNshiftLLreg x (MOVWconst [c]))
+       // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
        // cond:
-       // result: (MVNshiftLL x [c])
+       // result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpARMMVNshiftLL)
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                v.AddArg(x)
-               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MVNshiftRA (MOVWconst [c]) [d])
+       // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
        // cond:
-       // result: (MOVWconst [^int64(int32(c)>>uint64(d))])
+       // result: (RSCconst [c] (SLL <x.Type> x y) flags)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = ^int64(int32(c) >> uint64(d))
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+       // cond:
+       // result: (SBCshiftLL x y [c] flags)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMSBCshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MVNshiftRAreg x (MOVWconst [c]))
+       // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
        // cond:
-       // result: (MVNshiftRA x [c])
+       // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+       for {
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpARMMVNshiftRA)
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                v.AddArg(x)
-               v.AuxInt = c
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MVNshiftRL (MOVWconst [c]) [d])
+       // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
        // cond:
-       // result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
+       // result: (RSCconst [c] (SRA <x.Type> x y) flags)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = ^int64(uint32(c) >> uint64(d))
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v.AddArg(flags)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MVNshiftRLreg x (MOVWconst [c]))
+       // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
        // cond:
-       // result: (MVNshiftRL x [c])
+       // result: (SBCshiftRA x y [c] flags)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpARMMVNshiftRL)
-               v.AddArg(x)
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMSBCshiftRA)
                v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
        return false
 }
-func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod16 x y)
+       // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
        // cond:
-       // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
+       // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMOD)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               d := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               flags := v.Args[2]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AuxInt = d
                v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.AddArg(flags)
+               return true
+       }
+       // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+       // cond:
+       // result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
+       for {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               flags := v.Args[2]
+               v.reset(OpARMSBCconst)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
+               v.AddArg(flags)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod16u x y)
+       // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
        // cond:
-       // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
+       // result: (RSCconst [c] (SRL <x.Type> x y) flags)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMODU)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               flags := v.Args[3]
+               v.reset(OpARMRSCconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.AddArg(flags)
                return true
        }
-}
-func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mod32 x y)
+       // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
        // cond:
-       // result: (MOD x y)
+       // result: (SBCshiftRL x y [c] flags)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMMOD)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               flags := v.Args[3]
+               v.reset(OpARMSBCshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
+               v.AddArg(flags)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod32u x y)
+       // match: (SLL x (MOVWconst [c]))
        // cond:
-       // result: (MODU x y)
+       // result: (SLLconst x [c&31])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMODU)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMSLLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod8 x y)
+       // match: (SLLconst [c] (MOVWconst [d]))
        // cond:
-       // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
+       // result: (MOVWconst [int64(uint32(d)<<uint64(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMOD)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint32(d) << uint64(c))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mod8u x y)
+       // match: (SRA x (MOVWconst [c]))
        // cond:
-       // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // result: (SRAconst x [c&31])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMODU)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMSRAconst)
+               v.AuxInt = c & 31
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Move [s] _ _ mem)
-       // cond: SizeAndAlign(s).Size() == 0
-       // result: mem
-       for {
-               s := v.AuxInt
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = mem.Type
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 1
-       // result: (MOVBstore dst (MOVBUload src mem) mem)
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 1) {
-                       break
-               }
-               v.reset(OpARMMOVBstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore dst (MOVHUload src mem) mem)
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-                       break
-               }
-               v.reset(OpARMMOVHstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2
-       // result: (MOVBstore [1] dst (MOVBUload [1] src mem)           (MOVBstore dst (MOVBUload src mem) mem))
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2) {
-                       break
-               }
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = 1
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 1
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore dst (MOVWload src mem) mem)
+       // match: (SRAcond x _ (FlagEQ))
+       // cond:
+       // result: (SRAconst x [31])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
+               x := v.Args[0]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagEQ {
                        break
                }
-               v.reset(OpARMMOVWstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 31
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [2] dst (MOVHUload [2] src mem)           (MOVHstore dst (MOVHUload src mem) mem))
+       // match: (SRAcond x y (FlagLT_ULT))
+       // cond:
+       // result: (SRA x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-                       break
-               }
-               v.reset(OpARMMOVHstore)
-               v.AuxInt = 2
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-               v0.AuxInt = 2
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagLT_ULT {
+                       break
+               }
+               v.reset(OpARMSRA)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4
-       // result: (MOVBstore [3] dst (MOVBUload [3] src mem)           (MOVBstore [2] dst (MOVBUload [2] src mem)                      (MOVBstore [1] dst (MOVBUload [1] src mem)                              (MOVBstore dst (MOVBUload src mem) mem))))
+       // match: (SRAcond x _ (FlagLT_UGT))
+       // cond:
+       // result: (SRAconst x [31])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4) {
+               x := v.Args[0]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagLT_UGT {
                        break
                }
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = 3
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 3
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v1.AuxInt = 2
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v2.AuxInt = 2
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v3.AuxInt = 1
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v4.AuxInt = 1
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v5.AddArg(dst)
-               v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v6.AddArg(src)
-               v6.AddArg(mem)
-               v5.AddArg(v6)
-               v5.AddArg(mem)
-               v3.AddArg(v5)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 31
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 3
-       // result: (MOVBstore [2] dst (MOVBUload [2] src mem)           (MOVBstore [1] dst (MOVBUload [1] src mem)                      (MOVBstore dst (MOVBUload src mem) mem)))
+       // match: (SRAcond x y (FlagGT_ULT))
+       // cond:
+       // result: (SRA x y)
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 3) {
+               x := v.Args[0]
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagGT_ULT {
                        break
                }
-               v.reset(OpARMMOVBstore)
-               v.AuxInt = 2
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v0.AuxInt = 2
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v1.AuxInt = 1
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v2.AuxInt = 1
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-               v3.AddArg(dst)
-               v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-               v4.AddArg(src)
-               v4.AddArg(mem)
-               v3.AddArg(v4)
-               v3.AddArg(mem)
-               v1.AddArg(v3)
-               v.AddArg(v1)
+               v.reset(OpARMSRA)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512   && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
-       // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
+       // match: (SRAcond x _ (FlagGT_UGT))
+       // cond:
+       // result: (SRAconst x [31])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
+               x := v.Args[0]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMFlagGT_UGT {
                        break
                }
-               v.reset(OpARMDUFFCOPY)
-               v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
-               v.AddArg(dst)
-               v.AddArg(src)
-               v.AddArg(mem)
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 31
+               v.AddArg(x)
                return true
        }
-       // match: (Move [s] dst src mem)
-       // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
-       // result: (LoweredMove [SizeAndAlign(s).Align()]               dst             src             (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])            mem)
+       return false
+}
+func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SRAconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [int64(int32(d)>>uint64(c))])
        for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpARMLoweredMove)
-               v.AuxInt = SizeAndAlign(s).Align()
-               v.AddArg(dst)
-               v.AddArg(src)
-               v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
-               v0.AddArg(src)
-               v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-               v.AddArg(v0)
-               v.AddArg(mem)
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(int32(d) >> uint64(c))
                return true
        }
        return false
 }
-func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul16 x y)
+       // match: (SRL x (MOVWconst [c]))
        // cond:
-       // result: (MUL x y)
+       // result: (SRLconst x [c&31])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMUL)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMSRLconst)
+               v.AuxInt = c & 31
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32 x y)
+       // match: (SRLconst [c] (MOVWconst [d]))
        // cond:
-       // result: (MUL x y)
+       // result: (MOVWconst [int64(uint32(d)>>uint64(c))])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMUL)
-               v.AddArg(x)
-               v.AddArg(y)
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = int64(uint32(d) >> uint64(c))
                return true
        }
+       return false
 }
-func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul32F x y)
+       // match: (SUB (MOVWconst [c]) x)
        // cond:
-       // result: (MULF x y)
+       // result: (RSBconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUB x (MOVWconst [c]))
+       // cond:
+       // result: (SUBconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMULF)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMSUBconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mul32uhilo x y)
+       // match: (SUB x (SLLconst [c] y))
        // cond:
-       // result: (MULLU x y)
+       // result: (SUBshiftLL x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMULLU)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+       // match: (SUB (SLLconst [c] y) x)
+       // cond:
+       // result: (RSBshiftLL x y [c])
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mul64F x y)
+       // match: (SUB x (SRLconst [c] y))
        // cond:
-       // result: (MULD x y)
+       // result: (SUBshiftRL x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMULD)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Mul8 x y)
+       // match: (SUB (SRLconst [c] y) x)
        // cond:
-       // result: (MUL x y)
+       // result: (RSBshiftRL x y [c])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMMUL)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg16 x)
+       // match: (SUB x (SRAconst [c] y))
        // cond:
-       // result: (RSBconst [0] x)
+       // result: (SUBshiftRA x y [c])
        for {
                x := v.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg32 x)
+       // match: (SUB (SRAconst [c] y) x)
        // cond:
-       // result: (RSBconst [0] x)
+       // result: (RSBshiftRA x y [c])
        for {
-               x := v.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg32F x)
+       // match: (SUB x (SLL y z))
        // cond:
-       // result: (NEGF x)
+       // result: (SUBshiftLLreg x y z)
        for {
                x := v.Args[0]
-               v.reset(OpARMNEGF)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg64F x)
+       // match: (SUB (SLL y z) x)
        // cond:
-       // result: (NEGD x)
+       // result: (RSBshiftLLreg x y z)
        for {
-               x := v.Args[0]
-               v.reset(OpARMNEGD)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftLLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neg8 x)
+       // match: (SUB x (SRL y z))
        // cond:
-       // result: (RSBconst [0] x)
+       // result: (SUBshiftRLreg x y z)
        for {
                x := v.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = 0
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq16 x y)
+       // match: (SUB (SRL y z) x)
        // cond:
-       // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (RSBshiftRLreg x y z)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRL {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftRLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq32 x y)
+       // match: (SUB x (SRA y z))
        // cond:
-       // result: (NotEqual (CMP x y))
+       // result: (SUBshiftRAreg x y z)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRA {
+                       break
+               }
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq32F x y)
+       // match: (SUB (SRA y z) x)
        // cond:
-       // result: (NotEqual (CMPF x y))
+       // result: (RSBshiftRAreg x y z)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
+                       break
+               }
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBshiftRAreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-}
-func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq64F x y)
+       // match: (SUB x x)
        // cond:
-       // result: (NotEqual (CMPD x y))
+       // result: (MOVWconst [0])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               if x != v.Args[1] {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq8 x y)
+       // match: (SUBS (MOVWconst [c]) x)
        // cond:
-       // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+       // result: (RSBSconst [c] x)
+       for {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NeqB x y)
+       // match: (SUBS x (MOVWconst [c]))
        // cond:
-       // result: (XOR x y)
+       // result: (SUBSconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMXOR)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NeqPtr x y)
+       // match: (SUBS x (SLLconst [c] y))
        // cond:
-       // result: (NotEqual (CMP x y))
+       // result: (SUBSshiftLL x y [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMNotEqual)
-               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBSshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NilCheck ptr mem)
+       // match: (SUBS (SLLconst [c] y) x)
        // cond:
-       // result: (LoweredNilCheck ptr mem)
+       // result: (RSBSshiftLL x y [c])
        for {
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARMLoweredNilCheck)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftLL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpNot(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Not x)
+       // match: (SUBS x (SRLconst [c] y))
        // cond:
-       // result: (XORconst [1] x)
+       // result: (SUBSshiftRL x y [c])
        for {
                x := v.Args[0]
-               v.reset(OpARMXORconst)
-               v.AuxInt = 1
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBSshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NotEqual (FlagEQ))
+       // match: (SUBS (SRLconst [c] y) x)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (RSBSshiftRL x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagEQ {
+               if v_0.Op != OpARMSRLconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftRL)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (NotEqual (FlagLT_ULT))
+       // match: (SUBS x (SRAconst [c] y))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (SUBSshiftRA x y [c])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_1.AuxInt
+               y := v_1.Args[0]
+               v.reset(OpARMSUBSshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (NotEqual (FlagLT_UGT))
+       // match: (SUBS (SRAconst [c] y) x)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (RSBSshiftRA x y [c])
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagLT_UGT {
+               if v_0.Op != OpARMSRAconst {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               c := v_0.AuxInt
+               y := v_0.Args[0]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftRA)
+               v.AuxInt = c
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (NotEqual (FlagGT_ULT))
+       // match: (SUBS x (SLL y z))
        // cond:
-       // result: (MOVWconst [1])
+       // result: (SUBSshiftLLreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_ULT {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBSshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (NotEqual (FlagGT_UGT))
+       // match: (SUBS (SLL y z) x)
        // cond:
-       // result: (MOVWconst [1])
+       // result: (RSBSshiftLLreg x y z)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMFlagGT_UGT {
+               if v_0.Op != OpARMSLL {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 1
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftLLreg)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (NotEqual (InvertFlags x))
+       // match: (SUBS x (SRL y z))
        // cond:
-       // result: (NotEqual x)
+       // result: (SUBSshiftRLreg x y z)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMInvertFlags {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRL {
                        break
                }
-               x := v_0.Args[0]
-               v.reset(OpARMNotEqual)
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBSshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (OR (MOVWconst [c]) x)
+       // match: (SUBS (SRL y z) x)
        // cond:
-       // result: (ORconst [c] x)
+       // result: (RSBSshiftRLreg x y z)
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSRL {
                        break
                }
-               c := v_0.AuxInt
+               y := v_0.Args[0]
+               z := v_0.Args[1]
                x := v.Args[1]
-               v.reset(OpARMORconst)
-               v.AuxInt = c
+               v.reset(OpARMRSBSshiftRLreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (OR x (MOVWconst [c]))
+       // match: (SUBS x (SRA y z))
        // cond:
-       // result: (ORconst [c] x)
+       // result: (SUBSshiftRAreg x y z)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpARMSRA {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpARMORconst)
-               v.AuxInt = c
+               y := v_1.Args[0]
+               z := v_1.Args[1]
+               v.reset(OpARMSUBSshiftRAreg)
                v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(z)
                return true
        }
-       // match: (OR x (SLLconst [c] y))
+       // match: (SUBS (SRA y z) x)
        // cond:
-       // result: (ORshiftLL x y [c])
+       // result: (RSBSshiftRAreg x y z)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMSRA {
                        break
                }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMORshiftLL)
+               y := v_0.Args[0]
+               z := v_0.Args[1]
+               x := v.Args[1]
+               v.reset(OpARMRSBSshiftRAreg)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
+               v.AddArg(z)
                return true
        }
-       // match: (OR (SLLconst [c] y) x)
+       return false
+}
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBSshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (ORshiftLL x y [c])
+       // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSLLconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               y := v_0.Args[0]
                x := v.Args[1]
-               v.reset(OpARMORshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMRSBSconst)
                v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (OR x (SRLconst [c] y))
+       // match: (SUBSshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (ORshiftRL x y [c])
+       // result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMORshiftRL)
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = int64(uint32(c) << uint64(d))
                v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (OR (SRLconst [c] y) x)
+       return false
+}
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (ORshiftRL x y [c])
+       // result: (RSBSconst [c] (SLL <x.Type> x y))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               y := v_0.Args[0]
                x := v.Args[1]
-               v.reset(OpARMORshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
+               y := v.Args[2]
+               v.reset(OpARMRSBSconst)
                v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (OR x (SRAconst [c] y))
+       // match: (SUBSshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (ORshiftRA x y [c])
+       // result: (SUBSshiftLL x y [c])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMORshiftRA)
+               c := v_2.AuxInt
+               v.reset(OpARMSUBSshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (OR (SRAconst [c] y) x)
+       return false
+}
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBSshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (ORshiftRA x y [c])
+       // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRAconst {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
-               y := v_0.Args[0]
                x := v.Args[1]
-               v.reset(OpARMORshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMRSBSconst)
                v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (OR x (SLL y z))
+       // match: (SUBSshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (ORshiftLLreg x y z)
+       // result: (SUBSconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSLL {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMORshiftLLreg)
+               c := v_1.AuxInt
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = int64(int32(c) >> uint64(d))
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (OR (SLL y z) x)
+       return false
+}
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (ORshiftLLreg x y z)
+       // result: (RSBSconst [c] (SRA <x.Type> x y))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSLL {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
+               c := v_0.AuxInt
                x := v.Args[1]
-               v.reset(OpARMORshiftLLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               y := v.Args[2]
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (OR x (SRL y z))
+       // match: (SUBSshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (ORshiftRLreg x y z)
+       // result: (SUBSshiftRA x y [c])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRL {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMORshiftRLreg)
+               c := v_2.AuxInt
+               v.reset(OpARMSUBSshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (OR (SRL y z) x)
+       return false
+}
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBSshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (ORshiftRLreg x y z)
+       // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRL {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
+               c := v_0.AuxInt
                x := v.Args[1]
-               v.reset(OpARMORshiftRLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               v0.AuxInt = d
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (OR x (SRA y z))
+       // match: (SUBSshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (ORshiftRAreg x y z)
+       // result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSRA {
+               if v_1.Op != OpARMMOVWconst {
                        break
                }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMORshiftRAreg)
+               c := v_1.AuxInt
+               v.reset(OpARMSUBSconst)
+               v.AuxInt = int64(uint32(c) >> uint64(d))
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (OR (SRA y z) x)
+       return false
+}
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (ORshiftRAreg x y z)
+       // result: (RSBSconst [c] (SRL <x.Type> x y))
        for {
                v_0 := v.Args[0]
-               if v_0.Op != OpARMSRA {
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
+               c := v_0.AuxInt
                x := v.Args[1]
-               v.reset(OpARMORshiftRAreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               y := v.Args[2]
+               v.reset(OpARMRSBSconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (OR x x)
+       // match: (SUBSshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: x
+       // result: (SUBSshiftRL x y [c])
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
+               y := v.Args[1]
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = x.Type
+               c := v_2.AuxInt
+               v.reset(OpARMSUBSshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORconst [0] x)
+       // match: (SUBconst [0] x)
        // cond:
        // result: x
        for {
@@ -11377,118 +11659,137 @@ func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
                v.AddArg(x)
                return true
        }
-       // match: (ORconst [c] _)
-       // cond: int32(c)==-1
-       // result: (MOVWconst [-1])
+       // match: (SUBconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [int64(int32(d-c))])
        for {
                c := v.AuxInt
-               if !(int32(c) == -1) {
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
                        break
                }
+               d := v_0.AuxInt
                v.reset(OpARMMOVWconst)
-               v.AuxInt = -1
+               v.AuxInt = int64(int32(d - c))
                return true
        }
-       // match: (ORconst [c] (MOVWconst [d]))
+       // match: (SUBconst [c] (SUBconst [d] x))
        // cond:
-       // result: (MOVWconst [c|d])
+       // result: (ADDconst [int64(int32(-c-d))] x)
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v_0.Op != OpARMSUBconst {
                        break
                }
                d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = c | d
+               x := v_0.Args[0]
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(-c - d))
+               v.AddArg(x)
+               return true
+       }
+       // match: (SUBconst [c] (ADDconst [d] x))
+       // cond:
+       // result: (ADDconst [int64(int32(-c+d))] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMADDconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMADDconst)
+               v.AuxInt = int64(int32(-c + d))
+               v.AddArg(x)
                return true
        }
-       // match: (ORconst [c] (ORconst [d] x))
+       // match: (SUBconst [c] (RSBconst [d] x))
        // cond:
-       // result: (ORconst [c|d] x)
+       // result: (RSBconst [int64(int32(-c+d))] x)
        for {
                c := v.AuxInt
                v_0 := v.Args[0]
-               if v_0.Op != OpARMORconst {
+               if v_0.Op != OpARMRSBconst {
                        break
                }
                d := v_0.AuxInt
                x := v_0.Args[0]
-               v.reset(OpARMORconst)
-               v.AuxInt = c | d
+               v.reset(OpARMRSBconst)
+               v.AuxInt = int64(int32(-c + d))
                v.AddArg(x)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORshiftLL (MOVWconst [c]) x [d])
+       // match: (SUBshiftLL (MOVWconst [c]) x [d])
        // cond:
-       // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+       // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMORconst)
+               v.reset(OpARMRSBconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-       // match: (ORshiftLL x (MOVWconst [c]) [d])
+       // match: (SUBshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (ORconst x [int64(uint32(c)<<uint64(d))])
+       // result: (SUBconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMORconst)
-               v.AddArg(x)
+               v.reset(OpARMSUBconst)
                v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
                return true
        }
-       // match: (ORshiftLL x y:(SLLconst x [c]) [d])
+       // match: (SUBshiftLL x (SLLconst x [c]) [d])
        // cond: c==d
-       // result: y
+       // result: (MOVWconst [0])
        for {
+               d := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               if y.Op != OpARMSLLconst {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
                        break
                }
-               if x != y.Args[0] {
+               c := v_1.AuxInt
+               if x != v_1.Args[0] {
                        break
                }
-               c := y.AuxInt
-               d := v.AuxInt
                if !(c == d) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = y.Type
-               v.AddArg(y)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORshiftLLreg (MOVWconst [c]) x y)
+       // match: (SUBshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (ORconst [c] (SLL <x.Type> x y))
+       // result: (RSBconst [c] (SLL <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -11497,7 +11798,7 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
                c := v_0.AuxInt
                x := v.Args[1]
                y := v.Args[2]
-               v.reset(OpARMORconst)
+               v.reset(OpARMRSBconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
                v0.AddArg(x)
@@ -11505,9 +11806,9 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
                v.AddArg(v0)
                return true
        }
-       // match: (ORshiftLLreg x y (MOVWconst [c]))
+       // match: (SUBshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (ORshiftLL x y [c])
+       // result: (SUBshiftLL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
@@ -11516,82 +11817,81 @@ func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
                        break
                }
                c := v_2.AuxInt
-               v.reset(OpARMORshiftLL)
+               v.reset(OpARMSUBshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORshiftRA (MOVWconst [c]) x [d])
+       // match: (SUBshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+       // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMORconst)
+               v.reset(OpARMRSBconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-       // match: (ORshiftRA x (MOVWconst [c]) [d])
+       // match: (SUBshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (ORconst x [int64(int32(c)>>uint64(d))])
+       // result: (SUBconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMORconst)
-               v.AddArg(x)
+               v.reset(OpARMSUBconst)
                v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
                return true
        }
-       // match: (ORshiftRA x y:(SRAconst x [c]) [d])
+       // match: (SUBshiftRA x (SRAconst x [c]) [d])
        // cond: c==d
-       // result: y
+       // result: (MOVWconst [0])
        for {
+               d := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               if y.Op != OpARMSRAconst {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
                        break
                }
-               if x != y.Args[0] {
+               c := v_1.AuxInt
+               if x != v_1.Args[0] {
                        break
                }
-               c := y.AuxInt
-               d := v.AuxInt
                if !(c == d) {
                        break
                }
-               v.reset(OpCopy)
-               v.Type = y.Type
-               v.AddArg(y)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORshiftRAreg (MOVWconst [c]) x y)
+       // match: (SUBshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (ORconst [c] (SRA <x.Type> x y))
+       // result: (RSBconst [c] (SRA <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -11600,7 +11900,7 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
                c := v_0.AuxInt
                x := v.Args[1]
                y := v.Args[2]
-               v.reset(OpARMORconst)
+               v.reset(OpARMRSBconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
                v0.AddArg(x)
@@ -11608,9 +11908,9 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
                v.AddArg(v0)
                return true
        }
-       // match: (ORshiftRAreg x y (MOVWconst [c]))
+       // match: (SUBshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (ORshiftRA x y [c])
+       // result: (SUBshiftRA x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
@@ -11619,212 +11919,122 @@ func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
                        break
                }
                c := v_2.AuxInt
-               v.reset(OpARMORshiftRA)
+               v.reset(OpARMSUBshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ORshiftRL (MOVWconst [c]) x [d])
+       // match: (SUBshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+       // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMORconst)
+               v.reset(OpARMRSBconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-       // match: (ORshiftRL x (MOVWconst [c]) [d])
+       // match: (SUBshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (ORconst x [int64(uint32(c)>>uint64(d))])
+       // result: (SUBconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMORconst)
-               v.AddArg(x)
+               v.reset(OpARMSUBconst)
                v.AuxInt = int64(uint32(c) >> uint64(d))
-               return true
-       }
-       // match: (ORshiftRL x y:(SRLconst x [c]) [d])
-       // cond: c==d
-       // result: y
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               if y.Op != OpARMSRLconst {
-                       break
-               }
-               if x != y.Args[0] {
-                       break
-               }
-               c := y.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = y.Type
-               v.AddArg(y)
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ORshiftRLreg (MOVWconst [c]) x y)
-       // cond:
-       // result: (ORconst [c] (SRL <x.Type> x y))
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMORconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-       // match: (ORshiftRLreg x y (MOVWconst [c]))
-       // cond:
-       // result: (ORshiftRL x y [c])
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMORshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               return true
-       }
-       return false
-}
-func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (OffPtr [off] ptr:(SP))
-       // cond:
-       // result: (MOVWaddr [off] ptr)
-       for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               if ptr.Op != OpSP {
-                       break
-               }
-               v.reset(OpARMMOVWaddr)
-               v.AuxInt = off
-               v.AddArg(ptr)
-               return true
-       }
-       // match: (OffPtr [off] ptr)
-       // cond:
-       // result: (ADDconst [off] ptr)
-       for {
-               off := v.AuxInt
-               ptr := v.Args[0]
-               v.reset(OpARMADDconst)
-               v.AuxInt = off
-               v.AddArg(ptr)
-               return true
-       }
-}
-func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Or16 x y)
-       // cond:
-       // result: (OR x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMOR)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Or32 x y)
-       // cond:
-       // result: (OR x y)
+       // match: (SUBshiftRL x (SRLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
        for {
+               d := v.AuxInt
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMOR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if x != v_1.Args[0] {
+                       break
+               }
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or8 x y)
+       // match: (SUBshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (OR x y)
+       // result: (RSBconst [c] (SRL <x.Type> x y))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMOR)
-               v.AddArg(x)
-               v.AddArg(y)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               y := v.Args[2]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-}
-func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (OrB x y)
+       // match: (SUBshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (OR x y)
+       // result: (SUBshiftRL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMOR)
+               v_2 := v.Args[2]
+               if v_2.Op != OpARMMOVWconst {
+                       break
+               }
+               c := v_2.AuxInt
+               v.reset(OpARMSUBshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSB (MOVWconst [c]) x)
+       // match: (XOR (MOVWconst [c]) x)
        // cond:
-       // result: (SUBconst [c] x)
+       // result: (XORconst [c] x)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -11832,14 +12042,14 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               v.reset(OpARMSUBconst)
+               v.reset(OpARMXORconst)
                v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (RSB x (MOVWconst [c]))
+       // match: (XOR x (MOVWconst [c]))
        // cond:
-       // result: (RSBconst [c] x)
+       // result: (XORconst [c] x)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -11847,14 +12057,14 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpARMRSBconst)
+               v.reset(OpARMXORconst)
                v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (RSB x (SLLconst [c] y))
+       // match: (XOR x (SLLconst [c] y))
        // cond:
-       // result: (RSBshiftLL x y [c])
+       // result: (XORshiftLL x y [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -11863,15 +12073,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                }
                c := v_1.AuxInt
                y := v_1.Args[0]
-               v.reset(OpARMRSBshiftLL)
+               v.reset(OpARMXORshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (RSB (SLLconst [c] y) x)
+       // match: (XOR (SLLconst [c] y) x)
        // cond:
-       // result: (SUBshiftLL x y [c])
+       // result: (XORshiftLL x y [c])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSLLconst {
@@ -11880,15 +12090,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                c := v_0.AuxInt
                y := v_0.Args[0]
                x := v.Args[1]
-               v.reset(OpARMSUBshiftLL)
+               v.reset(OpARMXORshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (RSB x (SRLconst [c] y))
+       // match: (XOR x (SRLconst [c] y))
        // cond:
-       // result: (RSBshiftRL x y [c])
+       // result: (XORshiftRL x y [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -11897,15 +12107,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                }
                c := v_1.AuxInt
                y := v_1.Args[0]
-               v.reset(OpARMRSBshiftRL)
+               v.reset(OpARMXORshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (RSB (SRLconst [c] y) x)
+       // match: (XOR (SRLconst [c] y) x)
        // cond:
-       // result: (SUBshiftRL x y [c])
+       // result: (XORshiftRL x y [c])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSRLconst {
@@ -11914,15 +12124,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                c := v_0.AuxInt
                y := v_0.Args[0]
                x := v.Args[1]
-               v.reset(OpARMSUBshiftRL)
+               v.reset(OpARMXORshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (RSB x (SRAconst [c] y))
+       // match: (XOR x (SRAconst [c] y))
        // cond:
-       // result: (RSBshiftRA x y [c])
+       // result: (XORshiftRA x y [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -11931,15 +12141,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                }
                c := v_1.AuxInt
                y := v_1.Args[0]
-               v.reset(OpARMRSBshiftRA)
+               v.reset(OpARMXORshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (RSB (SRAconst [c] y) x)
+       // match: (XOR (SRAconst [c] y) x)
        // cond:
-       // result: (SUBshiftRA x y [c])
+       // result: (XORshiftRA x y [c])
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSRAconst {
@@ -11948,15 +12158,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                c := v_0.AuxInt
                y := v_0.Args[0]
                x := v.Args[1]
-               v.reset(OpARMSUBshiftRA)
+               v.reset(OpARMXORshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (RSB x (SLL y z))
+       // match: (XOR x (SLL y z))
        // cond:
-       // result: (RSBshiftLLreg x y z)
+       // result: (XORshiftLLreg x y z)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -11965,15 +12175,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                }
                y := v_1.Args[0]
                z := v_1.Args[1]
-               v.reset(OpARMRSBshiftLLreg)
+               v.reset(OpARMXORshiftLLreg)
                v.AddArg(x)
                v.AddArg(y)
                v.AddArg(z)
                return true
        }
-       // match: (RSB (SLL y z) x)
+       // match: (XOR (SLL y z) x)
        // cond:
-       // result: (SUBshiftLLreg x y z)
+       // result: (XORshiftLLreg x y z)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSLL {
@@ -11982,15 +12192,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                y := v_0.Args[0]
                z := v_0.Args[1]
                x := v.Args[1]
-               v.reset(OpARMSUBshiftLLreg)
+               v.reset(OpARMXORshiftLLreg)
                v.AddArg(x)
                v.AddArg(y)
                v.AddArg(z)
                return true
        }
-       // match: (RSB x (SRL y z))
+       // match: (XOR x (SRL y z))
        // cond:
-       // result: (RSBshiftRLreg x y z)
+       // result: (XORshiftRLreg x y z)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -11999,15 +12209,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                }
                y := v_1.Args[0]
                z := v_1.Args[1]
-               v.reset(OpARMRSBshiftRLreg)
+               v.reset(OpARMXORshiftRLreg)
                v.AddArg(x)
                v.AddArg(y)
                v.AddArg(z)
                return true
        }
-       // match: (RSB (SRL y z) x)
+       // match: (XOR (SRL y z) x)
        // cond:
-       // result: (SUBshiftRLreg x y z)
+       // result: (XORshiftRLreg x y z)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSRL {
@@ -12016,15 +12226,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                y := v_0.Args[0]
                z := v_0.Args[1]
                x := v.Args[1]
-               v.reset(OpARMSUBshiftRLreg)
+               v.reset(OpARMXORshiftRLreg)
                v.AddArg(x)
                v.AddArg(y)
                v.AddArg(z)
                return true
        }
-       // match: (RSB x (SRA y z))
+       // match: (XOR x (SRA y z))
        // cond:
-       // result: (RSBshiftRAreg x y z)
+       // result: (XORshiftRAreg x y z)
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
@@ -12033,15 +12243,15 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                }
                y := v_1.Args[0]
                z := v_1.Args[1]
-               v.reset(OpARMRSBshiftRAreg)
+               v.reset(OpARMXORshiftRAreg)
                v.AddArg(x)
                v.AddArg(y)
                v.AddArg(z)
                return true
        }
-       // match: (RSB (SRA y z) x)
+       // match: (XOR (SRA y z) x)
        // cond:
-       // result: (SUBshiftRAreg x y z)
+       // result: (XORshiftRAreg x y z)
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMSRA {
@@ -12050,13 +12260,13 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
                y := v_0.Args[0]
                z := v_0.Args[1]
                x := v.Args[1]
-               v.reset(OpARMSUBshiftRAreg)
+               v.reset(OpARMXORshiftRAreg)
                v.AddArg(x)
                v.AddArg(y)
                v.AddArg(z)
                return true
        }
-       // match: (RSB x x)
+       // match: (XOR x x)
        // cond:
        // result: (MOVWconst [0])
        for {
@@ -12070,52 +12280,121 @@ func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBSshiftLL (MOVWconst [c]) x [d])
+       // match: (XORconst [0] x)
        // cond:
-       // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
+       // result: x
+       for {
+               if v.AuxInt != 0 {
+                       break
+               }
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
+               return true
+       }
+       // match: (XORconst [c] (MOVWconst [d]))
+       // cond:
+       // result: (MOVWconst [c^d])
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMMOVWconst {
+                       break
+               }
+               d := v_0.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = c ^ d
+               return true
+       }
+       // match: (XORconst [c] (XORconst [d] x))
+       // cond:
+       // result: (XORconst [c^d] x)
+       for {
+               c := v.AuxInt
+               v_0 := v.Args[0]
+               if v_0.Op != OpARMXORconst {
+                       break
+               }
+               d := v_0.AuxInt
+               x := v_0.Args[0]
+               v.reset(OpARMXORconst)
+               v.AuxInt = c ^ d
+               v.AddArg(x)
+               return true
+       }
+       return false
+}
+func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (XORshiftLL (MOVWconst [c]) x [d])
+       // cond:
+       // result: (XORconst [c] (SLLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMSUBSconst)
+               v.reset(OpARMXORconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-       // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+       // match: (XORshiftLL x (MOVWconst [c]) [d])
        // cond:
-       // result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
+       // result: (XORconst x [int64(uint32(c)<<uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMRSBSconst)
-               v.AddArg(x)
+               v.reset(OpARMXORconst)
                v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(x)
+               return true
+       }
+       // match: (XORshiftLL x (SLLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
+       for {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSLLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if x != v_1.Args[0] {
+                       break
+               }
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+       // match: (XORshiftLLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (SUBSconst [c] (SLL <x.Type> x y))
+       // result: (XORconst [c] (SLL <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -12124,7 +12403,7 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
                c := v_0.AuxInt
                x := v.Args[1]
                y := v.Args[2]
-               v.reset(OpARMSUBSconst)
+               v.reset(OpARMXORconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
                v0.AddArg(x)
@@ -12132,9 +12411,9 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
                v.AddArg(v0)
                return true
        }
-       // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+       // match: (XORshiftLLreg x y (MOVWconst [c]))
        // cond:
-       // result: (RSBSshiftLL x y [c])
+       // result: (XORshiftLL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
@@ -12143,60 +12422,81 @@ func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
                        break
                }
                c := v_2.AuxInt
-               v.reset(OpARMRSBSshiftLL)
+               v.reset(OpARMXORshiftLL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+       // match: (XORshiftRA (MOVWconst [c]) x [d])
        // cond:
-       // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
+       // result: (XORconst [c] (SRAconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMSUBSconst)
+               v.reset(OpARMXORconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-       // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+       // match: (XORshiftRA x (MOVWconst [c]) [d])
        // cond:
-       // result: (RSBSconst x [int64(int32(c)>>uint64(d))])
+       // result: (XORconst x [int64(int32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMRSBSconst)
-               v.AddArg(x)
+               v.reset(OpARMXORconst)
                v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(x)
+               return true
+       }
+       // match: (XORshiftRA x (SRAconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
+       for {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRAconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if x != v_1.Args[0] {
+                       break
+               }
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+       // match: (XORshiftRAreg (MOVWconst [c]) x y)
        // cond:
-       // result: (SUBSconst [c] (SRA <x.Type> x y))
+       // result: (XORconst [c] (SRA <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -12205,7 +12505,7 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
                c := v_0.AuxInt
                x := v.Args[1]
                y := v.Args[2]
-               v.reset(OpARMSUBSconst)
+               v.reset(OpARMXORconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
                v0.AddArg(x)
@@ -12213,9 +12513,9 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
                v.AddArg(v0)
                return true
        }
-       // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+       // match: (XORshiftRAreg x y (MOVWconst [c]))
        // cond:
-       // result: (RSBSshiftRA x y [c])
+       // result: (XORshiftRA x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
@@ -12224,60 +12524,81 @@ func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
                        break
                }
                c := v_2.AuxInt
-               v.reset(OpARMRSBSshiftRA)
+               v.reset(OpARMXORshiftRA)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+       // match: (XORshiftRL (MOVWconst [c]) x [d])
        // cond:
-       // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+       // result: (XORconst [c] (SRLconst <x.Type> x [d]))
        for {
+               d := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
                        break
                }
                c := v_0.AuxInt
                x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMSUBSconst)
+               v.reset(OpARMXORconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
                v0.AuxInt = d
+               v0.AddArg(x)
                v.AddArg(v0)
                return true
        }
-       // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+       // match: (XORshiftRL x (MOVWconst [c]) [d])
        // cond:
-       // result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
+       // result: (XORconst x [int64(uint32(c)>>uint64(d))])
        for {
+               d := v.AuxInt
                x := v.Args[0]
                v_1 := v.Args[1]
                if v_1.Op != OpARMMOVWconst {
                        break
                }
                c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMRSBSconst)
-               v.AddArg(x)
+               v.reset(OpARMXORconst)
                v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(x)
+               return true
+       }
+       // match: (XORshiftRL x (SRLconst x [c]) [d])
+       // cond: c==d
+       // result: (MOVWconst [0])
+       for {
+               d := v.AuxInt
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARMSRLconst {
+                       break
+               }
+               c := v_1.AuxInt
+               if x != v_1.Args[0] {
+                       break
+               }
+               if !(c == d) {
+                       break
+               }
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+       // match: (XORshiftRLreg (MOVWconst [c]) x y)
        // cond:
-       // result: (SUBSconst [c] (SRL <x.Type> x y))
+       // result: (XORconst [c] (SRL <x.Type> x y))
        for {
                v_0 := v.Args[0]
                if v_0.Op != OpARMMOVWconst {
@@ -12286,7 +12607,7 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
                c := v_0.AuxInt
                x := v.Args[1]
                y := v.Args[2]
-               v.reset(OpARMSUBSconst)
+               v.reset(OpARMXORconst)
                v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
                v0.AddArg(x)
@@ -12294,9 +12615,9 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
                v.AddArg(v0)
                return true
        }
-       // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+       // match: (XORshiftRLreg x y (MOVWconst [c]))
        // cond:
-       // result: (RSBSshiftRL x y [c])
+       // result: (XORshiftRL x y [c])
        for {
                x := v.Args[0]
                y := v.Args[1]
@@ -12305,4182 +12626,3861 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
                        break
                }
                c := v_2.AuxInt
-               v.reset(OpARMRSBSshiftRL)
+               v.reset(OpARMXORshiftRL)
+               v.AuxInt = c
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBconst [c] (MOVWconst [d]))
-       // cond:
-       // result: (MOVWconst [int64(int32(c-d))])
-       for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(c - d))
-               return true
-       }
-       // match: (RSBconst [c] (RSBconst [d] x))
+       // match: (Add16 x y)
        // cond:
-       // result: (ADDconst [int64(int32(c-d))] x)
+       // result: (ADD x y)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMRSBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMADDconst)
-               v.AuxInt = int64(int32(c - d))
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (RSBconst [c] (ADDconst [d] x))
+}
+func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32 x y)
        // cond:
-       // result: (RSBconst [int64(int32(c-d))] x)
+       // result: (ADD x y)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = int64(int32(c - d))
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (RSBconst [c] (SUBconst [d] x))
+}
+func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32F x y)
        // cond:
-       // result: (RSBconst [int64(int32(c+d))] x)
+       // result: (ADDF x y)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSUBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = int64(int32(c + d))
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADDF)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBshiftLL (MOVWconst [c]) x [d])
+       // match: (Add32carry x y)
        // cond:
-       // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
+       // result: (ADDS x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMSUBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADDS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (RSBshiftLL x (MOVWconst [c]) [d])
+}
+func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add32withcarry x y c)
        // cond:
-       // result: (RSBconst x [int64(uint32(c)<<uint64(d))])
+       // result: (ADC x y c)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMRSBconst)
+               y := v.Args[1]
+               c := v.Args[2]
+               v.reset(OpARMADC)
                v.AddArg(x)
-               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.AddArg(y)
+               v.AddArg(c)
                return true
        }
-       // match: (RSBshiftLL x (SLLconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
+}
+func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Add64F x y)
+       // cond:
+       // result: (ADDD x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
-                       break
-               }
-               if x != v_1.Args[0] {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpARMADDD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+       // match: (Add8 x y)
        // cond:
-       // result: (SUBconst [c] (SLL <x.Type> x y))
+       // result: (ADD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMSUBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMADD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (RSBshiftLLreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (AddPtr x y)
        // cond:
-       // result: (RSBshiftLL x y [c])
+       // result: (ADD x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMRSBshiftLL)
+               v.reset(OpARMADD)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBshiftRA (MOVWconst [c]) x [d])
+       // match: (Addr {sym} base)
        // cond:
-       // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
+       // result: (MOVWaddr {sym} base)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMSUBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
+               sym := v.Aux
+               base := v.Args[0]
+               v.reset(OpARMMOVWaddr)
+               v.Aux = sym
+               v.AddArg(base)
                return true
        }
-       // match: (RSBshiftRA x (MOVWconst [c]) [d])
+}
+func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And16 x y)
        // cond:
-       // result: (RSBconst x [int64(int32(c)>>uint64(d))])
+       // result: (AND x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMRSBconst)
+               y := v.Args[1]
+               v.reset(OpARMAND)
                v.AddArg(x)
-               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(y)
                return true
        }
-       // match: (RSBshiftRA x (SRAconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
+}
+func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And32 x y)
+       // cond:
+       // result: (AND x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
-                       break
-               }
-               if x != v_1.Args[0] {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpARMAND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBshiftRAreg (MOVWconst [c]) x y)
+       // match: (And8 x y)
        // cond:
-       // result: (SUBconst [c] (SRA <x.Type> x y))
+       // result: (AND x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMSUBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMAND)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (RSBshiftRAreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (AndB x y)
        // cond:
-       // result: (RSBshiftRA x y [c])
+       // result: (AND x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMRSBshiftRA)
+               v.reset(OpARMAND)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBshiftRL (MOVWconst [c]) x [d])
+       // match: (ClosureCall [argwid] entry closure mem)
        // cond:
-       // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
+       // result: (CALLclosure [argwid] entry closure mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMSUBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               closure := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMCALLclosure)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(closure)
+               v.AddArg(mem)
                return true
        }
-       // match: (RSBshiftRL x (MOVWconst [c]) [d])
+}
+func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Com16 x)
        // cond:
-       // result: (RSBconst x [int64(uint32(c)>>uint64(d))])
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMRSBconst)
+               v.reset(OpARMMVN)
                v.AddArg(x)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
-               return true
-       }
-       // match: (RSBshiftRL x (SRLconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
-                       break
-               }
-               if x != v_1.Args[0] {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+       // match: (Com32 x)
        // cond:
-       // result: (SUBconst [c] (SRL <x.Type> x y))
+       // result: (MVN x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMSUBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.Args[0]
+               v.reset(OpARMMVN)
+               v.AddArg(x)
                return true
        }
-       // match: (RSBshiftRLreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Com8 x)
        // cond:
-       // result: (RSBshiftRL x y [c])
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMRSBshiftRL)
+               v.reset(OpARMMVN)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSCconst [c] (ADDconst [d] x) flags)
+       // match: (Const16 [val])
        // cond:
-       // result: (RSCconst [int64(int32(c-d))] x flags)
+       // result: (MOVWconst [val])
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = int64(int32(c - d))
-               v.AddArg(x)
-               v.AddArg(flags)
+               val := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = val
                return true
        }
-       // match: (RSCconst [c] (SUBconst [d] x) flags)
+}
+func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const32 [val])
        // cond:
-       // result: (RSCconst [int64(int32(c+d))] x flags)
+       // result: (MOVWconst [val])
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSUBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = int64(int32(c + d))
-               v.AddArg(x)
-               v.AddArg(flags)
+               val := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = val
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+       // match: (Const32F [val])
        // cond:
-       // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+       // result: (MOVFconst [val])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
-               v.AddArg(flags)
+               val := v.AuxInt
+               v.reset(OpARMMOVFconst)
+               v.AuxInt = val
                return true
        }
-       // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
+}
+func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Const64F [val])
        // cond:
-       // result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
+       // result: (MOVDconst [val])
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMRSCconst)
-               v.AddArg(x)
-               v.AuxInt = int64(uint32(c) << uint64(d))
-               v.AddArg(flags)
+               val := v.AuxInt
+               v.reset(OpARMMOVDconst)
+               v.AuxInt = val
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+       // match: (Const8 [val])
        // cond:
-       // result: (SBCconst [c] (SLL <x.Type> x y) flags)
+       // result: (MOVWconst [val])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               flags := v.Args[3]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v.AddArg(flags)
+               val := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = val
                return true
        }
-       // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+}
+func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (ConstBool [b])
        // cond:
-       // result: (RSCshiftLL x y [c] flags)
+       // result: (MOVWconst [b])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               flags := v.Args[3]
-               v.reset(OpARMRSCshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               b := v.AuxInt
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = b
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
+       // match: (ConstNil)
        // cond:
-       // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+       // result: (MOVWconst [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
-               v.AddArg(flags)
+               v.reset(OpARMMOVWconst)
+               v.AuxInt = 0
                return true
        }
-       // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
+}
+func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Convert x mem)
        // cond:
-       // result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
+       // result: (MOVWconvert x mem)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMRSCconst)
+               mem := v.Args[1]
+               v.reset(OpARMMOVWconvert)
                v.AddArg(x)
-               v.AuxInt = int64(int32(c) >> uint64(d))
-               v.AddArg(flags)
+               v.AddArg(mem)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+       // match: (Cvt32Fto32 x)
        // cond:
-       // result: (SBCconst [c] (SRA <x.Type> x y) flags)
+       // result: (MOVFW x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               flags := v.Args[3]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v.AddArg(flags)
+               x := v.Args[0]
+               v.reset(OpARMMOVFW)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32Fto32U x)
+       // cond:
+       // result: (MOVFWU x)
+       for {
+               x := v.Args[0]
+               v.reset(OpARMMOVFWU)
+               v.AddArg(x)
                return true
        }
-       // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+}
+func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32Fto64F x)
        // cond:
-       // result: (RSCshiftRA x y [c] flags)
+       // result: (MOVFD x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               flags := v.Args[3]
-               v.reset(OpARMRSCshiftRA)
+               v.reset(OpARMMOVFD)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+       // match: (Cvt32Uto32F x)
        // cond:
-       // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+       // result: (MOVWUF x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
-               v.AddArg(flags)
+               x := v.Args[0]
+               v.reset(OpARMMOVWUF)
+               v.AddArg(x)
                return true
        }
-       // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+}
+func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32Uto64F x)
        // cond:
-       // result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
+       // result: (MOVWUD x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMRSCconst)
+               v.reset(OpARMMOVWUD)
                v.AddArg(x)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
-               v.AddArg(flags)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
+       // match: (Cvt32to32F x)
        // cond:
-       // result: (SBCconst [c] (SRL <x.Type> x y) flags)
+       // result: (MOVWF x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               flags := v.Args[3]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v.AddArg(flags)
+               x := v.Args[0]
+               v.reset(OpARMMOVWF)
+               v.AddArg(x)
                return true
        }
-       // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+}
+func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32to64F x)
        // cond:
-       // result: (RSCshiftRL x y [c] flags)
+       // result: (MOVWD x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               flags := v.Args[3]
-               v.reset(OpARMRSCshiftRL)
+               v.reset(OpARMMOVWD)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux16 x y)
+       // match: (Cvt64Fto32 x)
        // cond:
-       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       // result: (MOVDW x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v3.AuxInt = 256
-               v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
-               v.AuxInt = 0
+               v.reset(OpARMMOVDW)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux32 x y)
+       // match: (Cvt64Fto32F x)
        // cond:
-       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+       // result: (MOVDF x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v2.AuxInt = 256
-               v2.AddArg(y)
-               v.AddArg(v2)
-               v.AuxInt = 0
+               v.reset(OpARMMOVDF)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
+func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
+       // match: (Cvt64Fto32U x)
+       // cond:
+       // result: (MOVDWU x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(OpARMSRLconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 16
-               v.AddArg(v0)
-               v.AuxInt = c + 16
+               v.reset(OpARMMOVDWU)
+               v.AddArg(x)
                return true
        }
-       // match: (Rsh16Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (Const16 [0])
+}
+func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (DeferCall [argwid] mem)
+       // cond:
+       // result: (CALLdefer [argwid] mem)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
-                       break
-               }
-               v.reset(OpConst16)
-               v.AuxInt = 0
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpARMCALLdefer)
+               v.AuxInt = argwid
+               v.AddArg(mem)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16Ux8  x y)
+       // match: (Div16 x y)
        // cond:
-       // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+       // result: (DIV (SignExt16to32 x) (SignExt16to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRL)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v.reset(OpARMDIV)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v1.AddArg(y)
                v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x16 x y)
+       // match: (Div16u x y)
        // cond:
-       // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+       // result: (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRAcond)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v.reset(OpARMDIVU)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v0.AddArg(x)
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v1.AddArg(y)
                v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v2.AuxInt = 256
-               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v3.AddArg(y)
-               v2.AddArg(v3)
-               v.AddArg(v2)
                return true
        }
 }
-func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Div32 x y)
+       // cond:
+       // result: (DIV x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMDIV)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x32 x y)
+       // match: (Div32F x y)
        // cond:
-       // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+       // result: (DIVF x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRAcond)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
+               v.reset(OpARMDIVF)
+               v.AddArg(x)
                v.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v1.AuxInt = 256
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x64 x (Const64 [c]))
-       // cond: uint64(c) < 16
-       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 16
-               v.AddArg(v0)
-               v.AuxInt = c + 16
-               return true
-       }
-       // match: (Rsh16x64 x (Const64 [c]))
-       // cond: uint64(c) >= 16
-       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
+       // match: (Div32u x y)
+       // cond:
+       // result: (DIVU x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 16
-               v.AddArg(v0)
-               v.AuxInt = 31
+               y := v.Args[1]
+               v.reset(OpARMDIVU)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh16x8  x y)
+       // match: (Div64F x y)
        // cond:
-       // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+       // result: (DIVD x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRA)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.reset(OpARMDIVD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux16 x y)
+       // match: (Div8 x y)
        // cond:
-       // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       // result: (DIV (SignExt8to32 x) (SignExt8to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v.reset(OpARMDIV)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
                v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v0.AddArg(v1)
                v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v2.AuxInt = 256
-               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v3.AddArg(y)
-               v2.AddArg(v3)
-               v.AddArg(v2)
-               v.AuxInt = 0
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux32 x y)
+       // match: (Div8u x y)
        // cond:
-       // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+       // result: (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v.reset(OpARMDIVU)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
                v0.AddArg(x)
-               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v1.AuxInt = 256
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
                v1.AddArg(y)
                v.AddArg(v1)
-               v.AuxInt = 0
                return true
        }
 }
-func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SRLconst x [c])
+       // match: (Eq16 x y)
+       // cond:
+       // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(OpARMSRLconst)
-               v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (Rsh32Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (Const32 [0])
-       for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
-                       break
-               }
-               v.reset(OpConst32)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32Ux8  x y)
+       // match: (Eq32 x y)
        // cond:
-       // result: (SRL x (ZeroExt8to32 y))
+       // result: (Equal (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRL)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x16 x y)
+       // match: (Eq32F x y)
        // cond:
-       // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+       // result: (Equal (CMPF x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRAcond)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v1.AuxInt = 256
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v1.AddArg(v2)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x32 x y)
+       // match: (Eq64F x y)
        // cond:
-       // result: (SRAcond x y (CMPconst [256] y))
+       // result: (Equal (CMPD x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRAcond)
-               v.AddArg(x)
-               v.AddArg(y)
-               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v0.AuxInt = 256
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x64 x (Const64 [c]))
-       // cond: uint64(c) < 32
-       // result: (SRAconst x [c])
+       // match: (Eq8 x y)
+       // cond:
+       // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               y := v.Args[1]
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (Rsh32x64 x (Const64 [c]))
-       // cond: uint64(c) >= 32
-       // result: (SRAconst x [31])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
-               v.AddArg(x)
-               v.AuxInt = 31
+}
+func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (EqB x y)
+       // cond:
+       // result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMXORconst)
+               v.AuxInt = 1
+               v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh32x8  x y)
+       // match: (EqPtr x y)
        // cond:
-       // result: (SRA x (ZeroExt8to32 y))
+       // result: (Equal (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRA)
-               v.AddArg(x)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v.reset(OpARMEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux16 x y)
+       // match: (Geq16 x y)
        // cond:
-       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+       // result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v2.AddArg(y)
                v0.AddArg(v2)
                v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v3.AuxInt = 256
-               v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
-               v.AuxInt = 0
                return true
        }
 }
-func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux32 x y)
+       // match: (Geq16U x y)
        // cond:
-       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+       // result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMCMOVWHSconst)
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v.reset(OpARMGreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v2.AuxInt = 256
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v2.AddArg(y)
-               v.AddArg(v2)
-               v.AuxInt = 0
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+       // match: (Geq32 x y)
+       // cond:
+       // result: (GreaterEqual (CMP x y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
-                       break
-               }
-               v.reset(OpARMSRLconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
                v0.AddArg(x)
-               v0.AuxInt = 24
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AuxInt = c + 24
                return true
        }
-       // match: (Rsh8Ux64 _ (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (Const8 [0])
+}
+func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Geq32F x y)
+       // cond:
+       // result: (GreaterEqual (CMPF x y))
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
-                       break
-               }
-               v.reset(OpConst8)
-               v.AuxInt = 0
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8Ux8  x y)
+       // match: (Geq32U x y)
        // cond:
-       // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // result: (GreaterEqualU (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRL)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v.reset(OpARMGreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x16 x y)
+       // match: (Geq64F x y)
        // cond:
-       // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+       // result: (GreaterEqual (CMPD x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRAcond)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v2.AuxInt = 256
-               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v3.AddArg(y)
-               v2.AddArg(v3)
-               v.AddArg(v2)
                return true
        }
 }
-func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x32 x y)
+       // match: (Geq8 x y)
        // cond:
-       // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+       // result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRAcond)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v.AddArg(y)
-               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-               v1.AuxInt = 256
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x64 x (Const64 [c]))
-       // cond: uint64(c) < 8
-       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
+       // match: (Geq8U x y)
+       // cond:
+       // result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 8) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 24
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v.AuxInt = c + 24
                return true
        }
-       // match: (Rsh8x64 x (Const64 [c]))
-       // cond: uint64(c) >= 8
-       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
+}
+func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GetClosurePtr)
+       // cond:
+       // result: (LoweredGetClosurePtr)
+       for {
+               v.reset(OpARMLoweredGetClosurePtr)
+               return true
+       }
+}
+func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (GoCall [argwid] mem)
+       // cond:
+       // result: (CALLgo [argwid] mem)
+       for {
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpARMCALLgo)
+               v.AuxInt = argwid
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater16 x y)
+       // cond:
+       // result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpConst64 {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
-                       break
-               }
-               v.reset(OpARMSRAconst)
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v0.AuxInt = 24
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v.AuxInt = 31
                return true
        }
-       return false
 }
-func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Rsh8x8  x y)
+       // match: (Greater16U x y)
        // cond:
-       // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+       // result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSRA)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
+               v.reset(OpARMGreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
+func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBC (MOVWconst [c]) x flags)
+       // match: (Greater32 x y)
        // cond:
-       // result: (RSCconst [c] x flags)
+       // result: (GreaterThan (CMP x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               v.AddArg(flags)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC x (MOVWconst [c]) flags)
+}
+func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32F x y)
        // cond:
-       // result: (SBCconst [c] x flags)
+       // result: (GreaterThan (CMPF x y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               v.AddArg(flags)
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC x (SLLconst [c] y) flags)
+}
+func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater32U x y)
        // cond:
-       // result: (SBCshiftLL x y [c] flags)
+       // result: (GreaterThanU (CMP x y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               flags := v.Args[2]
-               v.reset(OpARMSBCshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               y := v.Args[1]
+               v.reset(OpARMGreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC (SLLconst [c] y) x flags)
+}
+func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater64F x y)
        // cond:
-       // result: (RSCshiftLL x y [c] flags)
+       // result: (GreaterThan (CMPD x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMRSCshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC x (SRLconst [c] y) flags)
+}
+func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater8 x y)
        // cond:
-       // result: (SBCshiftRL x y [c] flags)
+       // result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               flags := v.Args[2]
-               v.reset(OpARMSBCshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC (SRLconst [c] y) x flags)
+}
+func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Greater8U x y)
        // cond:
-       // result: (RSCshiftRL x y [c] flags)
+       // result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMRSCshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC x (SRAconst [c] y) flags)
+}
+func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul16 x y)
        // cond:
-       // result: (SBCshiftRA x y [c] flags)
+       // result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               flags := v.Args[2]
-               v.reset(OpARMSBCshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               y := v.Args[1]
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC (SRAconst [c] y) x flags)
+}
+func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul16u x y)
        // cond:
-       // result: (RSCshiftRA x y [c] flags)
+       // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRAconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMRSCshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRLconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC x (SLL y z) flags)
+}
+func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul32 x y)
        // cond:
-       // result: (SBCshiftLLreg x y z flags)
+       // result: (HMUL x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLL {
-                       break
-               }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMSBCshiftLLreg)
+               y := v.Args[1]
+               v.reset(OpARMHMUL)
                v.AddArg(x)
                v.AddArg(y)
-               v.AddArg(z)
-               v.AddArg(flags)
                return true
        }
-       // match: (SBC (SLL y z) x flags)
+}
+func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul32u x y)
        // cond:
-       // result: (RSCshiftLLreg x y z flags)
+       // result: (HMULU x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLL {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMRSCshiftLLreg)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMHMULU)
                v.AddArg(x)
                v.AddArg(y)
-               v.AddArg(z)
-               v.AddArg(flags)
                return true
        }
-       // match: (SBC x (SRL y z) flags)
+}
+func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul8 x y)
        // cond:
-       // result: (SBCshiftRLreg x y z flags)
+       // result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRL {
-                       break
-               }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMSBCshiftRLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
-               v.AddArg(flags)
+               y := v.Args[1]
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 8
+               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC (SRL y z) x flags)
+}
+func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Hmul8u x y)
        // cond:
-       // result: (RSCshiftRLreg x y z flags)
+       // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRL {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMRSCshiftRLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
-               v.AddArg(flags)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRLconst)
+               v.AuxInt = 8
+               v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBC x (SRA y z) flags)
+}
+func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (InterCall [argwid] entry mem)
        // cond:
-       // result: (SBCshiftRAreg x y z flags)
+       // result: (CALLinter [argwid] entry mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRA {
-                       break
-               }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMSBCshiftRAreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
-               v.AddArg(flags)
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMCALLinter)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(mem)
                return true
        }
-       // match: (SBC (SRA y z) x flags)
+}
+func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsInBounds idx len)
        // cond:
-       // result: (RSCshiftRAreg x y z flags)
+       // result: (LessThanU (CMP idx len))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRA {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               flags := v.Args[2]
-               v.reset(OpARMRSCshiftRAreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
-               v.AddArg(flags)
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpARMLessThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBCconst [c] (ADDconst [d] x) flags)
+       // match: (IsNonNil ptr)
        // cond:
-       // result: (SBCconst [int64(int32(c-d))] x flags)
+       // result: (NotEqual (CMPconst [0] ptr))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = int64(int32(c - d))
-               v.AddArg(x)
-               v.AddArg(flags)
+               ptr := v.Args[0]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = 0
+               v0.AddArg(ptr)
+               v.AddArg(v0)
                return true
        }
-       // match: (SBCconst [c] (SUBconst [d] x) flags)
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsSliceInBounds idx len)
        // cond:
-       // result: (SBCconst [int64(int32(c+d))] x flags)
+       // result: (LessEqualU (CMP idx len))
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSUBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               flags := v.Args[1]
-               v.reset(OpARMSBCconst)
-               v.AuxInt = int64(int32(c + d))
-               v.AddArg(x)
-               v.AddArg(flags)
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpARMLessEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq16 x y)
+       // cond:
+       // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq16U x y)
+       // cond:
+       // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
+       // match: (Leq32 x y)
        // cond:
-       // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+       // result: (LessEqual (CMP x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
                v0.AddArg(x)
-               v0.AuxInt = d
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(flags)
                return true
        }
-       // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
+}
+func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32F x y)
        // cond:
-       // result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
+       // result: (GreaterEqual (CMPF y x))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMSBCconst)
-               v.AddArg(x)
-               v.AuxInt = int64(uint32(c) << uint64(d))
-               v.AddArg(flags)
+               y := v.Args[1]
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
+       // match: (Leq32U x y)
        // cond:
-       // result: (RSCconst [c] (SLL <x.Type> x y) flags)
+       // result: (LessEqualU (CMP x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               flags := v.Args[3]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v.AddArg(flags)
                return true
        }
-       // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+}
+func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq64F x y)
        // cond:
-       // result: (SBCshiftLL x y [c] flags)
+       // result: (GreaterEqual (CMPD y x))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               flags := v.Args[3]
-               v.reset(OpARMSBCshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               v.reset(OpARMGreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
+       // match: (Leq8 x y)
        // cond:
-       // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+       // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v.AddArg(flags)
                return true
        }
-       // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+}
+func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq8U x y)
        // cond:
-       // result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
+       // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMSBCconst)
-               v.AddArg(x)
-               v.AuxInt = int64(int32(c) >> uint64(d))
-               v.AddArg(flags)
+               y := v.Args[1]
+               v.reset(OpARMLessEqualU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
+       // match: (Less16 x y)
        // cond:
-       // result: (RSCconst [c] (SRA <x.Type> x y) flags)
+       // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               flags := v.Args[3]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v.AddArg(flags)
                return true
        }
-       // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+}
+func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less16U x y)
        // cond:
-       // result: (SBCshiftRA x y [c] flags)
+       // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               flags := v.Args[3]
-               v.reset(OpARMSBCshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               v.reset(OpARMLessThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
+       // match: (Less32 x y)
        // cond:
-       // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+       // result: (LessThan (CMP x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMLessThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less32F x y)
+       // cond:
+       // result: (GreaterThan (CMPF y x))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(y)
                v0.AddArg(x)
-               v0.AuxInt = d
                v.AddArg(v0)
-               v.AddArg(flags)
                return true
        }
-       // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+}
+func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less32U x y)
        // cond:
-       // result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
+       // result: (LessThanU (CMP x y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               flags := v.Args[2]
-               v.reset(OpARMSBCconst)
-               v.AddArg(x)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
-               v.AddArg(flags)
+               y := v.Args[1]
+               v.reset(OpARMLessThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
+       // match: (Less64F x y)
        // cond:
-       // result: (RSCconst [c] (SRL <x.Type> x y) flags)
+       // result: (GreaterThan (CMPD y x))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               flags := v.Args[3]
-               v.reset(OpARMRSCconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v0.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMGreaterThan)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
                v0.AddArg(y)
+               v0.AddArg(x)
                v.AddArg(v0)
-               v.AddArg(flags)
                return true
        }
-       // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+}
+func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Less8 x y)
        // cond:
-       // result: (SBCshiftRL x y [c] flags)
+       // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               flags := v.Args[3]
-               v.reset(OpARMSBCshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               v.AddArg(flags)
+               v.reset(OpARMLessThan)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SLL x (MOVWconst [c]))
+       // match: (Less8U x y)
        // cond:
-       // result: (SLLconst x [c&31])
+       // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpARMSLLconst)
-               v.AddArg(x)
-               v.AuxInt = c & 31
+               y := v.Args[1]
+               v.reset(OpARMLessThanU)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SLLconst [c] (MOVWconst [d]))
-       // cond:
-       // result: (MOVWconst [int64(uint32(d)<<uint64(c))])
+       // match: (Load <t> ptr mem)
+       // cond: t.IsBoolean()
+       // result: (MOVBUload ptr mem)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(t.IsBoolean()) {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(uint32(d) << uint64(c))
+               v.reset(OpARMMOVBUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SRA x (MOVWconst [c]))
-       // cond:
-       // result: (SRAconst x [c&31])
+       // match: (Load <t> ptr mem)
+       // cond: (is8BitInt(t) && isSigned(t))
+       // result: (MOVBload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is8BitInt(t) && isSigned(t)) {
                        break
                }
-               c := v_1.AuxInt
-               v.reset(OpARMSRAconst)
-               v.AddArg(x)
-               v.AuxInt = c & 31
+               v.reset(OpARMMOVBload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SRAcond x _ (FlagEQ))
-       // cond:
-       // result: (SRAconst x [31])
+       // match: (Load <t> ptr mem)
+       // cond: (is8BitInt(t) && !isSigned(t))
+       // result: (MOVBUload ptr mem)
        for {
-               x := v.Args[0]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMFlagEQ {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is8BitInt(t) && !isSigned(t)) {
                        break
                }
-               v.reset(OpARMSRAconst)
-               v.AddArg(x)
-               v.AuxInt = 31
+               v.reset(OpARMMOVBUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SRAcond x y (FlagLT_ULT))
-       // cond:
-       // result: (SRA x y)
+       // match: (Load <t> ptr mem)
+       // cond: (is16BitInt(t) && isSigned(t))
+       // result: (MOVHload ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMFlagLT_ULT {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t) && isSigned(t)) {
                        break
                }
-               v.reset(OpARMSRA)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMMOVHload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SRAcond x _ (FlagLT_UGT))
-       // cond:
-       // result: (SRAconst x [31])
+       // match: (Load <t> ptr mem)
+       // cond: (is16BitInt(t) && !isSigned(t))
+       // result: (MOVHUload ptr mem)
        for {
-               x := v.Args[0]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMFlagLT_UGT {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t) && !isSigned(t)) {
                        break
                }
-               v.reset(OpARMSRAconst)
-               v.AddArg(x)
-               v.AuxInt = 31
+               v.reset(OpARMMOVHUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SRAcond x y (FlagGT_ULT))
-       // cond:
-       // result: (SRA x y)
+       // match: (Load <t> ptr mem)
+       // cond: (is32BitInt(t) || isPtr(t))
+       // result: (MOVWload ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMFlagGT_ULT {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitInt(t) || isPtr(t)) {
                        break
                }
-               v.reset(OpARMSRA)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMMOVWload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SRAcond x _ (FlagGT_UGT))
-       // cond:
-       // result: (SRAconst x [31])
+       // match: (Load <t> ptr mem)
+       // cond: is32BitFloat(t)
+       // result: (MOVFload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitFloat(t)) {
+                       break
+               }
+               v.reset(OpARMMOVFload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is64BitFloat(t)
+       // result: (MOVDload ptr mem)
        for {
-               x := v.Args[0]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMFlagGT_UGT {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitFloat(t)) {
                        break
                }
-               v.reset(OpARMSRAconst)
-               v.AddArg(x)
-               v.AuxInt = 31
+               v.reset(OpARMMOVDload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SRAconst [c] (MOVWconst [d]))
+       // match: (Lrot16 <t> x [c])
        // cond:
-       // result: (MOVWconst [int64(int32(d)>>uint64(c))])
+       // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
        for {
+               t := v.Type
                c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(d) >> uint64(c))
+               x := v.Args[0]
+               v.reset(OpARMOR)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+               v0.AuxInt = c & 15
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+               v1.AuxInt = 16 - c&15
+               v1.AddArg(x)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SRL x (MOVWconst [c]))
+       // match: (Lrot32 x [c])
        // cond:
-       // result: (SRLconst x [c&31])
+       // result: (SRRconst x [32-c&31])
        for {
+               c := v.AuxInt
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpARMSRLconst)
+               v.reset(OpARMSRRconst)
+               v.AuxInt = 32 - c&31
                v.AddArg(x)
-               v.AuxInt = c & 31
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SRLconst [c] (MOVWconst [d]))
+       // match: (Lrot8 <t> x [c])
        // cond:
-       // result: (MOVWconst [int64(uint32(d)>>uint64(c))])
+       // result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
        for {
+               t := v.Type
                c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(uint32(d) >> uint64(c))
+               x := v.Args[0]
+               v.reset(OpARMOR)
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
+               v0.AuxInt = c & 7
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
+               v1.AuxInt = 8 - c&7
+               v1.AddArg(x)
+               v.AddArg(v1)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUB (MOVWconst [c]) x)
+       // match: (Lsh16x16 x y)
        // cond:
-       // result: (RSBconst [c] x)
+       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
                return true
        }
-       // match: (SUB x (MOVWconst [c]))
+}
+func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x32 x y)
        // cond:
-       // result: (SUBconst [c] x)
+       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpARMSUBconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUB x (SLLconst [c] y))
-       // cond:
-       // result: (SUBshiftLL x y [c])
+}
+func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SLLconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMSUBshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               return true
-       }
-       // match: (SUB (SLLconst [c] y) x)
-       // cond:
-       // result: (RSBshiftLL x y [c])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLLconst {
+               if !(uint64(c) < 16) {
                        break
                }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMRSBshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMSLLconst)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (SUB x (SRLconst [c] y))
-       // cond:
-       // result: (SUBshiftRL x y [c])
+       // match: (Lsh16x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (Const16 [0])
        for {
-               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMSUBshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               return true
-       }
-       // match: (SUB (SRLconst [c] y) x)
-       // cond:
-       // result: (RSBshiftRL x y [c])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
+               if !(uint64(c) >= 16) {
                        break
                }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMRSBshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               v.reset(OpConst16)
+               v.AuxInt = 0
                return true
        }
-       // match: (SUB x (SRAconst [c] y))
+       return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x8  x y)
        // cond:
-       // result: (SUBshiftRA x y [c])
+       // result: (SLL x (ZeroExt8to32 y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMSUBshiftRA)
+               y := v.Args[1]
+               v.reset(OpARMSLL)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SUB (SRAconst [c] y) x)
+}
+func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x16 x y)
        // cond:
-       // result: (RSBshiftRA x y [c])
+       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRAconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMRSBshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
                return true
        }
-       // match: (SUB x (SLL y z))
+}
+func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x32 x y)
        // cond:
-       // result: (SUBshiftLLreg x y z)
+       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLL {
-                       break
-               }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMSUBshiftLLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
-               return true
-       }
-       // match: (SUB (SLL y z) x)
-       // cond:
-       // result: (RSBshiftLLreg x y z)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLL {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMRSBshiftLLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUB x (SRL y z))
-       // cond:
-       // result: (SUBshiftRLreg x y z)
+}
+func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SLLconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSRL {
+               if v_1.Op != OpConst64 {
                        break
                }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMSUBshiftRLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
-               return true
-       }
-       // match: (SUB (SRL y z) x)
-       // cond:
-       // result: (RSBshiftRLreg x y z)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRL {
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
                        break
                }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMRSBshiftRLreg)
+               v.reset(OpARMSLLconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (SUB x (SRA y z))
-       // cond:
-       // result: (SUBshiftRAreg x y z)
+       // match: (Lsh32x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (Const32 [0])
        for {
-               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSRA {
+               if v_1.Op != OpConst64 {
                        break
                }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMSUBshiftRAreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(OpConst32)
+               v.AuxInt = 0
                return true
        }
-       // match: (SUB (SRA y z) x)
+       return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh32x8  x y)
        // cond:
-       // result: (RSBshiftRAreg x y z)
+       // result: (SLL x (ZeroExt8to32 y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRA {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMRSBshiftRAreg)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSLL)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SUB x x)
+}
+func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x16 x y)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
                v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
+func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBS (MOVWconst [c]) x)
+       // match: (Lsh8x32 x y)
        // cond:
-       // result: (RSBSconst [c] x)
+       // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpARMRSBSconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUBS x (MOVWconst [c]))
-       // cond:
-       // result: (SUBSconst [c] x)
+}
+func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SLLconst x [c])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpARMSUBSconst)
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(OpARMSLLconst)
                v.AuxInt = c
                v.AddArg(x)
                return true
        }
-       // match: (SUBS x (SLLconst [c] y))
-       // cond:
-       // result: (SUBSshiftLL x y [c])
+       // match: (Lsh8x64 _ (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (Const8 [0])
        for {
-               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMSUBSshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               return true
-       }
-       // match: (SUBS (SLLconst [c] y) x)
-       // cond:
-       // result: (RSBSshiftLL x y [c])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLLconst {
+               if !(uint64(c) >= 8) {
                        break
                }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMRSBSshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               v.reset(OpConst8)
+               v.AuxInt = 0
                return true
        }
-       // match: (SUBS x (SRLconst [c] y))
+       return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x8  x y)
        // cond:
-       // result: (SUBSshiftRL x y [c])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMSUBSshiftRL)
+       // result: (SLL x (ZeroExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSLL)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (SUBS (SRLconst [c] y) x)
+}
+func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16 x y)
        // cond:
-       // result: (RSBSshiftRL x y [c])
+       // result: (MOD (SignExt16to32 x) (SignExt16to32 y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMRSBSshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMOD)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUBS x (SRAconst [c] y))
+}
+func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod16u x y)
        // cond:
-       // result: (SUBSshiftRA x y [c])
+       // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMSUBSshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               y := v.Args[1]
+               v.reset(OpARMMODU)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUBS (SRAconst [c] y) x)
+}
+func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32 x y)
        // cond:
-       // result: (RSBSshiftRA x y [c])
+       // result: (MOD x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRAconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMRSBSshiftRA)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMOD)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       // match: (SUBS x (SLL y z))
+}
+func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod32u x y)
        // cond:
-       // result: (SUBSshiftLLreg x y z)
+       // result: (MODU x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLL {
-                       break
-               }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMSUBSshiftLLreg)
+               y := v.Args[1]
+               v.reset(OpARMMODU)
                v.AddArg(x)
                v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (SUBS (SLL y z) x)
+}
+func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8 x y)
        // cond:
-       // result: (RSBSshiftLLreg x y z)
+       // result: (MOD (SignExt8to32 x) (SignExt8to32 y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLL {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMRSBSshiftLLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMOD)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUBS x (SRL y z))
+}
+func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mod8u x y)
        // cond:
-       // result: (SUBSshiftRLreg x y z)
+       // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRL {
+               y := v.Args[1]
+               v.reset(OpARMMODU)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM_OpMove(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Move [s] _ _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
+       for {
+               s := v.AuxInt
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 0) {
                        break
                }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMSUBSshiftRLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               v.reset(OpCopy)
+               v.Type = mem.Type
+               v.AddArg(mem)
                return true
        }
-       // match: (SUBS (SRL y z) x)
-       // cond:
-       // result: (RSBSshiftRLreg x y z)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore dst (MOVBUload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRL {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 1) {
                        break
                }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMRSBSshiftRLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               v.reset(OpARMMOVBstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SUBS x (SRA y z))
-       // cond:
-       // result: (SUBSshiftRAreg x y z)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore dst (MOVHUload src mem) mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRA {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
                        break
                }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMSUBSshiftRAreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               v.reset(OpARMMOVHstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SUBS (SRA y z) x)
-       // cond:
-       // result: (RSBSshiftRAreg x y z)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVBstore [1] dst (MOVBUload [1] src mem)           (MOVBstore dst (MOVBUload src mem) mem))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRA {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2) {
                        break
                }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMRSBSshiftRAreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = 1
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 1
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SUBSshiftLL (MOVWconst [c]) x [d])
-       // cond:
-       // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore dst (MOVWload src mem) mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMRSBSconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
+               v.reset(OpARMMOVWstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
+               v0.AddArg(src)
+               v0.AddArg(mem)
                v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
-       // match: (SUBSshiftLL x (MOVWconst [c]) [d])
-       // cond:
-       // result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [2] dst (MOVHUload [2] src mem)           (MOVHstore dst (MOVHUload src mem) mem))
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMSUBSconst)
-               v.AddArg(x)
-               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.reset(OpARMMOVHstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
-       // cond:
-       // result: (RSBSconst [c] (SLL <x.Type> x y))
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4
+       // result: (MOVBstore [3] dst (MOVBUload [3] src mem)           (MOVBstore [2] dst (MOVBUload [2] src mem)                      (MOVBstore [1] dst (MOVBUload [1] src mem)                              (MOVBstore dst (MOVBUload src mem) mem))))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 4) {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMRSBSconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = 3
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 3
+               v0.AddArg(src)
+               v0.AddArg(mem)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v1.AuxInt = 2
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v2.AuxInt = 2
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v3.AuxInt = 1
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v4.AuxInt = 1
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v5.AddArg(dst)
+               v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v6.AddArg(src)
+               v6.AddArg(mem)
+               v5.AddArg(v6)
+               v5.AddArg(mem)
+               v3.AddArg(v5)
+               v1.AddArg(v3)
+               v.AddArg(v1)
                return true
        }
-       // match: (SUBSshiftLLreg x y (MOVWconst [c]))
-       // cond:
-       // result: (SUBSshiftLL x y [c])
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 3
+       // result: (MOVBstore [2] dst (MOVBUload [2] src mem)           (MOVBstore [1] dst (MOVBUload [1] src mem)                      (MOVBstore dst (MOVBUload src mem) mem)))
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 3) {
                        break
                }
-               c := v_2.AuxInt
-               v.reset(OpARMSUBSshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               v.reset(OpARMMOVBstore)
+               v.AuxInt = 2
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v0.AuxInt = 2
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v1.AuxInt = 1
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v2.AuxInt = 1
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
+               v3.AddArg(dst)
+               v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
+               v4.AddArg(src)
+               v4.AddArg(mem)
+               v3.AddArg(v4)
+               v3.AddArg(mem)
+               v1.AddArg(v3)
+               v.AddArg(v1)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (SUBSshiftRA (MOVWconst [c]) x [d])
-       // cond:
-       // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512   && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
+       // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMRSBSconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
+               v.reset(OpARMDUFFCOPY)
+               v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
+               v.AddArg(dst)
+               v.AddArg(src)
+               v.AddArg(mem)
                return true
        }
-       // match: (SUBSshiftRA x (MOVWconst [c]) [d])
-       // cond:
-       // result: (SUBSconst x [int64(int32(c)>>uint64(d))])
+       // match: (Move [s] dst src mem)
+       // cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
+       // result: (LoweredMove [SizeAndAlign(s).Align()]               dst             src             (ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)])            mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMSUBSconst)
-               v.AddArg(x)
-               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.reset(OpARMLoweredMove)
+               v.AuxInt = SizeAndAlign(s).Align()
+               v.AddArg(dst)
+               v.AddArg(src)
+               v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
+               v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(src)
+               v.AddArg(v0)
+               v.AddArg(mem)
                return true
        }
        return false
 }
-func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
-       // cond:
-       // result: (RSBSconst [c] (SRA <x.Type> x y))
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMRSBSconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
-       }
-       // match: (SUBSshiftRAreg x y (MOVWconst [c]))
+       // match: (Mul16 x y)
        // cond:
-       // result: (SUBSshiftRA x y [c])
+       // result: (MUL x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMSUBSshiftRA)
+               v.reset(OpARMMUL)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBSshiftRL (MOVWconst [c]) x [d])
-       // cond:
-       // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMRSBSconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
-               return true
-       }
-       // match: (SUBSshiftRL x (MOVWconst [c]) [d])
+       // match: (Mul32 x y)
        // cond:
-       // result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
+       // result: (MUL x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMSUBSconst)
+               y := v.Args[1]
+               v.reset(OpARMMUL)
                v.AddArg(x)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
+       // match: (Mul32F x y)
        // cond:
-       // result: (RSBSconst [c] (SRL <x.Type> x y))
+       // result: (MULF x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMRSBSconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMULF)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SUBSshiftRLreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul32uhilo x y)
        // cond:
-       // result: (SUBSshiftRL x y [c])
+       // result: (MULLU x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMSUBSshiftRL)
+               v.reset(OpARMMULLU)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBconst [0] x)
+       // match: (Mul64F x y)
        // cond:
-       // result: x
+       // result: (MULD x y)
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v.Args[1]
+               v.reset(OpARMMULD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SUBconst [c] (MOVWconst [d]))
+}
+func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul8 x y)
        // cond:
-       // result: (MOVWconst [int64(int32(d-c))])
+       // result: (MUL x y)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = int64(int32(d - c))
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMMUL)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (SUBconst [c] (SUBconst [d] x))
+}
+func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg16 x)
        // cond:
-       // result: (ADDconst [int64(int32(-c-d))] x)
+       // result: (RSBconst [0] x)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSUBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMADDconst)
-               v.AuxInt = int64(int32(-c - d))
+               x := v.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
                v.AddArg(x)
                return true
        }
-       // match: (SUBconst [c] (ADDconst [d] x))
+}
+func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32 x)
        // cond:
-       // result: (ADDconst [int64(int32(-c+d))] x)
+       // result: (RSBconst [0] x)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMADDconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMADDconst)
-               v.AuxInt = int64(int32(-c + d))
+               x := v.Args[0]
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
                v.AddArg(x)
                return true
        }
-       // match: (SUBconst [c] (RSBconst [d] x))
+}
+func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32F x)
        // cond:
-       // result: (RSBconst [int64(int32(-c+d))] x)
+       // result: (NEGF x)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMRSBconst {
-                       break
-               }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = int64(int32(-c + d))
+               x := v.Args[0]
+               v.reset(OpARMNEGF)
                v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBshiftLL (MOVWconst [c]) x [d])
+       // match: (Neg64F x)
        // cond:
-       // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
+       // result: (NEGD x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
+               x := v.Args[0]
+               v.reset(OpARMNEGD)
+               v.AddArg(x)
                return true
        }
-       // match: (SUBshiftLL x (MOVWconst [c]) [d])
+}
+func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg8 x)
        // cond:
-       // result: (SUBconst x [int64(uint32(c)<<uint64(d))])
+       // result: (RSBconst [0] x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMSUBconst)
+               v.reset(OpARMRSBconst)
+               v.AuxInt = 0
                v.AddArg(x)
-               v.AuxInt = int64(uint32(c) << uint64(d))
                return true
        }
-       // match: (SUBshiftLL x (SLLconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
+}
+func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq16 x y)
+       // cond:
+       // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
-                       break
-               }
-               if x != v_1.Args[0] {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBshiftLLreg (MOVWconst [c]) x y)
+       // match: (Neq32 x y)
        // cond:
-       // result: (RSBconst [c] (SLL <x.Type> x y))
+       // result: (NotEqual (CMP x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-       // match: (SUBshiftLLreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32F x y)
        // cond:
-       // result: (SUBshiftLL x y [c])
+       // result: (NotEqual (CMPF x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMSUBshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBshiftRA (MOVWconst [c]) x [d])
+       // match: (Neq64F x y)
        // cond:
-       // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
+       // result: (NotEqual (CMPD x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
                v0.AddArg(x)
-               v0.AuxInt = d
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-       // match: (SUBshiftRA x (MOVWconst [c]) [d])
-       // cond:
-       // result: (SUBconst x [int64(int32(c)>>uint64(d))])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMSUBconst)
-               v.AddArg(x)
-               v.AuxInt = int64(int32(c) >> uint64(d))
-               return true
-       }
-       // match: (SUBshiftRA x (SRAconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
-                       break
-               }
-               if x != v_1.Args[0] {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
-               return true
-       }
-       return false
 }
-func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBshiftRAreg (MOVWconst [c]) x y)
+       // match: (Neq8 x y)
        // cond:
-       // result: (RSBconst [c] (SRA <x.Type> x y))
+       // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
                return true
        }
-       // match: (SUBshiftRAreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqB x y)
        // cond:
-       // result: (SUBshiftRA x y [c])
+       // result: (XOR x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMSUBshiftRA)
+               v.reset(OpARMXOR)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBshiftRL (MOVWconst [c]) x [d])
+       // match: (NeqPtr x y)
        // cond:
-       // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
+       // result: (NotEqual (CMP x y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMNotEqual)
+               v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
                v0.AddArg(x)
-               v0.AuxInt = d
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
-       // match: (SUBshiftRL x (MOVWconst [c]) [d])
+}
+func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NilCheck ptr mem)
        // cond:
-       // result: (SUBconst x [int64(uint32(c)>>uint64(d))])
+       // result: (LoweredNilCheck ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMSUBconst)
-               v.AddArg(x)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARMLoweredNilCheck)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (SUBshiftRL x (SRLconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
+}
+func rewriteValueARM_OpNot(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Not x)
+       // cond:
+       // result: (XORconst [1] x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
-                       break
-               }
-               if x != v_1.Args[0] {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.reset(OpARMXORconst)
+               v.AuxInt = 1
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SUBshiftRLreg (MOVWconst [c]) x y)
+       // match: (OffPtr [off] ptr:(SP))
        // cond:
-       // result: (RSBconst [c] (SRL <x.Type> x y))
+       // result: (MOVWaddr [off] ptr)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               if ptr.Op != OpSP {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMRSBconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARMMOVWaddr)
+               v.AuxInt = off
+               v.AddArg(ptr)
+               return true
+       }
+       // match: (OffPtr [off] ptr)
+       // cond:
+       // result: (ADDconst [off] ptr)
+       for {
+               off := v.AuxInt
+               ptr := v.Args[0]
+               v.reset(OpARMADDconst)
+               v.AuxInt = off
+               v.AddArg(ptr)
                return true
        }
-       // match: (SUBshiftRLreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Or16 x y)
        // cond:
-       // result: (SUBshiftRL x y [c])
+       // result: (OR x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMSUBshiftRL)
+               v.reset(OpARMOR)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
+func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt16to32 x)
+       // match: (Or32 x y)
        // cond:
-       // result: (MOVHreg x)
+       // result: (OR x y)
        for {
                x := v.Args[0]
-               v.reset(OpARMMOVHreg)
+               y := v.Args[1]
+               v.reset(OpARMOR)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
+func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt8to16 x)
+       // match: (Or8 x y)
        // cond:
-       // result: (MOVBreg x)
+       // result: (OR x y)
        for {
                x := v.Args[0]
-               v.reset(OpARMMOVBreg)
+               y := v.Args[1]
+               v.reset(OpARMOR)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
+func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SignExt8to32 x)
+       // match: (OrB x y)
        // cond:
-       // result: (MOVBreg x)
+       // result: (OR x y)
        for {
                x := v.Args[0]
-               v.reset(OpARMMOVBreg)
+               y := v.Args[1]
+               v.reset(OpARMOR)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Signmask x)
+       // match: (Rsh16Ux16 x y)
        // cond:
-       // result: (SRAconst x [31])
+       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
        for {
                x := v.Args[0]
-               v.reset(OpARMSRAconst)
-               v.AddArg(x)
-               v.AuxInt = 31
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v3.AuxInt = 256
+               v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sqrt x)
+       // match: (Rsh16Ux32 x y)
        // cond:
-       // result: (SQRTD x)
+       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
        for {
                x := v.Args[0]
-               v.reset(OpARMSQRTD)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v2.AddArg(y)
+               v.AddArg(v2)
                return true
        }
 }
-func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (StaticCall [argwid] {target} mem)
-       // cond:
-       // result: (CALLstatic [argwid] {target} mem)
+       // match: (Rsh16Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
        for {
-               argwid := v.AuxInt
-               target := v.Aux
-               mem := v.Args[0]
-               v.reset(OpARMCALLstatic)
-               v.AuxInt = argwid
-               v.Aux = target
-               v.AddArg(mem)
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
+                       break
+               }
+               v.reset(OpARMSRLconst)
+               v.AuxInt = c + 16
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AuxInt = 16
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (Rsh16Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (Const16 [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
+                       break
+               }
+               v.reset(OpConst16)
+               v.AuxInt = 0
                return true
        }
+       return false
 }
-func rewriteValueARM_OpStore(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Store [1] ptr val mem)
+       // match: (Rsh16Ux8  x y)
        // cond:
-       // result: (MOVBstore ptr val mem)
+       // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
        for {
-               if v.AuxInt != 1 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVBstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRL)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (Store [2] ptr val mem)
+}
+func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x16 x y)
        // cond:
-       // result: (MOVHstore ptr val mem)
+       // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
        for {
-               if v.AuxInt != 2 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARMMOVHstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
                return true
        }
-       // match: (Store [4] ptr val mem)
-       // cond: !is32BitFloat(val.Type)
-       // result: (MOVWstore ptr val mem)
+}
+func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x32 x y)
+       // cond:
+       // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
        for {
-               if v.AuxInt != 4 {
-                       break
-               }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(!is32BitFloat(val.Type)) {
-                       break
-               }
-               v.reset(OpARMMOVWstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v.AddArg(y)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (Store [4] ptr val mem)
-       // cond: is32BitFloat(val.Type)
-       // result: (MOVFstore ptr val mem)
+}
+func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh16x64 x (Const64 [c]))
+       // cond: uint64(c) < 16
+       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
        for {
-               if v.AuxInt != 4 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is32BitFloat(val.Type)) {
+               c := v_1.AuxInt
+               if !(uint64(c) < 16) {
                        break
                }
-               v.reset(OpARMMOVFstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARMSRAconst)
+               v.AuxInt = c + 16
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AuxInt = 16
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (Store [8] ptr val mem)
-       // cond: is64BitFloat(val.Type)
-       // result: (MOVDstore ptr val mem)
+       // match: (Rsh16x64 x (Const64 [c]))
+       // cond: uint64(c) >= 16
+       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
        for {
-               if v.AuxInt != 8 {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
                        break
                }
-               ptr := v.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is64BitFloat(val.Type)) {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 16) {
                        break
                }
-               v.reset(OpARMMOVDstore)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 31
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AuxInt = 16
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
        return false
 }
-func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub16 x y)
+       // match: (Rsh16x8  x y)
        // cond:
-       // result: (SUB x y)
+       // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMSRA)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub32 x y)
+       // match: (Rsh32Ux16 x y)
        // cond:
-       // result: (SUB x y)
+       // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSUB)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
                return true
        }
 }
-func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub32F x y)
+       // match: (Rsh32Ux32 x y)
        // cond:
-       // result: (SUBF x y)
+       // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSUBF)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub32carry x y)
-       // cond:
-       // result: (SUBS x y)
+       // match: (Rsh32Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SRLconst x [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSUBS)
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
+                       break
+               }
+               v.reset(OpARMSRLconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
+       // match: (Rsh32Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (Const32 [0])
+       for {
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(OpConst32)
+               v.AuxInt = 0
+               return true
+       }
+       return false
 }
-func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub32withcarry x y c)
+       // match: (Rsh32Ux8  x y)
        // cond:
-       // result: (SBC x y c)
+       // result: (SRL x (ZeroExt8to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               c := v.Args[2]
-               v.reset(OpARMSBC)
+               v.reset(OpARMSRL)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(c)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub64F x y)
+       // match: (Rsh32x16 x y)
        // cond:
-       // result: (SUBD x y)
+       // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSUBD)
+               v.reset(OpARMSRAcond)
                v.AddArg(x)
-               v.AddArg(y)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v1.AddArg(v2)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Sub8 x y)
+       // match: (Rsh32x32 x y)
        // cond:
-       // result: (SUB x y)
+       // result: (SRAcond x y (CMPconst [256] y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARMSUB)
+               v.reset(OpARMSRAcond)
                v.AddArg(x)
                v.AddArg(y)
+               v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v0.AuxInt = 256
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (SubPtr x y)
-       // cond:
-       // result: (SUB x y)
+       // match: (Rsh32x64 x (Const64 [c]))
+       // cond: uint64(c) < 32
+       // result: (SRAconst x [c])
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARMSUB)
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) < 32) {
+                       break
+               }
+               v.reset(OpARMSRAconst)
+               v.AuxInt = c
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
-}
-func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Trunc16to8 x)
-       // cond:
-       // result: x
+       // match: (Rsh32x64 x (Const64 [c]))
+       // cond: uint64(c) >= 32
+       // result: (SRAconst x [31])
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               v_1 := v.Args[1]
+               if v_1.Op != OpConst64 {
+                       break
+               }
+               c := v_1.AuxInt
+               if !(uint64(c) >= 32) {
+                       break
+               }
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 31
                v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc32to16 x)
+       // match: (Rsh32x8  x y)
        // cond:
-       // result: x
+       // result: (SRA x (ZeroExt8to32 y))
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
+               y := v.Args[1]
+               v.reset(OpARMSRA)
                v.AddArg(x)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Trunc32to8 x)
+       // match: (Rsh8Ux16 x y)
        // cond:
-       // result: x
+       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
        for {
                x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v3.AuxInt = 256
+               v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
+func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (XOR (MOVWconst [c]) x)
+       // match: (Rsh8Ux32 x y)
        // cond:
-       // result: (XORconst [c] x)
+       // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpARMXORconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMCMOVWHSconst)
+               v.AuxInt = 0
+               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v2.AddArg(y)
+               v.AddArg(v2)
                return true
        }
-       // match: (XOR x (MOVWconst [c]))
-       // cond:
-       // result: (XORconst [c] x)
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               v.reset(OpARMXORconst)
-               v.AuxInt = c
-               v.AddArg(x)
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(OpARMSRLconst)
+               v.AuxInt = c + 24
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AuxInt = 24
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (XOR x (SLLconst [c] y))
-       // cond:
-       // result: (XORshiftLL x y [c])
+       // match: (Rsh8Ux64 _ (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (Const8 [0])
        for {
-               x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMXORshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               if !(uint64(c) >= 8) {
+                       break
+               }
+               v.reset(OpConst8)
+               v.AuxInt = 0
                return true
        }
-       // match: (XOR (SLLconst [c] y) x)
+       return false
+}
+func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8Ux8  x y)
        // cond:
-       // result: (XORshiftLL x y [c])
+       // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMXORshiftLL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRL)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (XOR x (SRLconst [c] y))
+}
+func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x16 x y)
        // cond:
-       // result: (XORshiftRL x y [c])
+       // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
-                       break
-               }
-               c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMXORshiftRL)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v2.AuxInt = 256
+               v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v3.AddArg(y)
+               v2.AddArg(v3)
+               v.AddArg(v2)
                return true
        }
-       // match: (XOR (SRLconst [c] y) x)
+}
+func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x32 x y)
        // cond:
-       // result: (XORshiftRL x y [c])
+       // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRLconst {
-                       break
-               }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMXORshiftRL)
-               v.AddArg(x)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRAcond)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
                v.AddArg(y)
-               v.AuxInt = c
+               v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
+               v1.AuxInt = 256
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (XOR x (SRAconst [c] y))
-       // cond:
-       // result: (XORshiftRA x y [c])
+}
+func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x64 x (Const64 [c]))
+       // cond: uint64(c) < 8
+       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
+               if v_1.Op != OpConst64 {
                        break
                }
                c := v_1.AuxInt
-               y := v_1.Args[0]
-               v.reset(OpARMXORshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
-               return true
-       }
-       // match: (XOR (SRAconst [c] y) x)
-       // cond:
-       // result: (XORshiftRA x y [c])
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRAconst {
+               if !(uint64(c) < 8) {
                        break
                }
-               c := v_0.AuxInt
-               y := v_0.Args[0]
-               x := v.Args[1]
-               v.reset(OpARMXORshiftRA)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
+               v.reset(OpARMSRAconst)
+               v.AuxInt = c + 24
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AuxInt = 24
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (XOR x (SLL y z))
-       // cond:
-       // result: (XORshiftLLreg x y z)
+       // match: (Rsh8x64 x (Const64 [c]))
+       // cond: uint64(c) >= 8
+       // result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
        for {
                x := v.Args[0]
                v_1 := v.Args[1]
-               if v_1.Op != OpARMSLL {
+               if v_1.Op != OpConst64 {
                        break
                }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMXORshiftLLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
+                       break
+               }
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 31
+               v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
+               v0.AuxInt = 24
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (XOR (SLL y z) x)
+       return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Rsh8x8  x y)
        // cond:
-       // result: (XORshiftLLreg x y z)
+       // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSLL {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMXORshiftLLreg)
-               v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSRA)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
-       // match: (XOR x (SRL y z))
+}
+func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt16to32 x)
        // cond:
-       // result: (XORshiftRLreg x y z)
+       // result: (MOVHreg x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRL {
-                       break
-               }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMXORshiftRLreg)
+               v.reset(OpARMMOVHreg)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (XOR (SRL y z) x)
+}
+func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt8to16 x)
        // cond:
-       // result: (XORshiftRLreg x y z)
+       // result: (MOVBreg x)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRL {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMXORshiftRLreg)
+               x := v.Args[0]
+               v.reset(OpARMMOVBreg)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (XOR x (SRA y z))
+}
+func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SignExt8to32 x)
        // cond:
-       // result: (XORshiftRAreg x y z)
+       // result: (MOVBreg x)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRA {
-                       break
-               }
-               y := v_1.Args[0]
-               z := v_1.Args[1]
-               v.reset(OpARMXORshiftRAreg)
+               v.reset(OpARMMOVBreg)
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (XOR (SRA y z) x)
+}
+func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Signmask x)
        // cond:
-       // result: (XORshiftRAreg x y z)
+       // result: (SRAconst x [31])
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMSRA {
-                       break
-               }
-               y := v_0.Args[0]
-               z := v_0.Args[1]
-               x := v.Args[1]
-               v.reset(OpARMXORshiftRAreg)
+               x := v.Args[0]
+               v.reset(OpARMSRAconst)
+               v.AuxInt = 31
                v.AddArg(x)
-               v.AddArg(y)
-               v.AddArg(z)
                return true
        }
-       // match: (XOR x x)
+}
+func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sqrt x)
        // cond:
-       // result: (MOVWconst [0])
+       // result: (SQRTD x)
        for {
                x := v.Args[0]
-               if x != v.Args[1] {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.reset(OpARMSQRTD)
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
+func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (XORconst [0] x)
+       // match: (StaticCall [argwid] {target} mem)
        // cond:
-       // result: x
+       // result: (CALLstatic [argwid] {target} mem)
        for {
-               if v.AuxInt != 0 {
-                       break
-               }
-               x := v.Args[0]
-               v.reset(OpCopy)
-               v.Type = x.Type
-               v.AddArg(x)
+               argwid := v.AuxInt
+               target := v.Aux
+               mem := v.Args[0]
+               v.reset(OpARMCALLstatic)
+               v.AuxInt = argwid
+               v.Aux = target
+               v.AddArg(mem)
                return true
        }
-       // match: (XORconst [c] (MOVWconst [d]))
+}
+func rewriteValueARM_OpStore(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Store [1] ptr val mem)
        // cond:
-       // result: (MOVWconst [c^d])
+       // result: (MOVBstore ptr val mem)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v.AuxInt != 1 {
                        break
                }
-               d := v_0.AuxInt
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = c ^ d
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVBstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (XORconst [c] (XORconst [d] x))
+       // match: (Store [2] ptr val mem)
        // cond:
-       // result: (XORconst [c^d] x)
+       // result: (MOVHstore ptr val mem)
        for {
-               c := v.AuxInt
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMXORconst {
+               if v.AuxInt != 2 {
                        break
                }
-               d := v_0.AuxInt
-               x := v_0.Args[0]
-               v.reset(OpARMXORconst)
-               v.AuxInt = c ^ d
-               v.AddArg(x)
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARMMOVHstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (XORshiftLL (MOVWconst [c]) x [d])
-       // cond:
-       // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+       // match: (Store [4] ptr val mem)
+       // cond: !is32BitFloat(val.Type)
+       // result: (MOVWstore ptr val mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v.AuxInt != 4 {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMXORconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
-               return true
-       }
-       // match: (XORshiftLL x (MOVWconst [c]) [d])
-       // cond:
-       // result: (XORconst x [int64(uint32(c)<<uint64(d))])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(!is32BitFloat(val.Type)) {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMXORconst)
-               v.AddArg(x)
-               v.AuxInt = int64(uint32(c) << uint64(d))
+               v.reset(OpARMMOVWstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (XORshiftLL x (SLLconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
+       // match: (Store [4] ptr val mem)
+       // cond: is32BitFloat(val.Type)
+       // result: (MOVFstore ptr val mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSLLconst {
-                       break
-               }
-               if x != v_1.Args[0] {
+               if v.AuxInt != 4 {
                        break
                }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is32BitFloat(val.Type)) {
                        break
                }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.reset(OpARMMOVFstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (XORshiftLLreg (MOVWconst [c]) x y)
-       // cond:
-       // result: (XORconst [c] (SLL <x.Type> x y))
+       // match: (Store [8] ptr val mem)
+       // cond: is64BitFloat(val.Type)
+       // result: (MOVDstore ptr val mem)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
+               if v.AuxInt != 8 {
                        break
                }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMXORconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               ptr := v.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is64BitFloat(val.Type)) {
+                       break
+               }
+               v.reset(OpARMMOVDstore)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-       // match: (XORshiftLLreg x y (MOVWconst [c]))
+       return false
+}
+func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub16 x y)
        // cond:
-       // result: (XORshiftLL x y [c])
+       // result: (SUB x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMXORshiftLL)
+               v.reset(OpARMSUB)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (XORshiftRA (MOVWconst [c]) x [d])
+       // match: (Sub32 x y)
        // cond:
-       // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+       // result: (SUB x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMXORconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSUB)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (XORshiftRA x (MOVWconst [c]) [d])
+}
+func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub32F x y)
        // cond:
-       // result: (XORconst x [int64(int32(c)>>uint64(d))])
+       // result: (SUBF x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMXORconst)
+               y := v.Args[1]
+               v.reset(OpARMSUBF)
                v.AddArg(x)
-               v.AuxInt = int64(int32(c) >> uint64(d))
+               v.AddArg(y)
                return true
        }
-       // match: (XORshiftRA x (SRAconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
+}
+func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub32carry x y)
+       // cond:
+       // result: (SUBS x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRAconst {
-                       break
-               }
-               if x != v_1.Args[0] {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               y := v.Args[1]
+               v.reset(OpARMSUBS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (XORshiftRAreg (MOVWconst [c]) x y)
+       // match: (Sub32withcarry x y c)
        // cond:
-       // result: (XORconst [c] (SRA <x.Type> x y))
+       // result: (SBC x y c)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMXORconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.Args[0]
+               y := v.Args[1]
+               c := v.Args[2]
+               v.reset(OpARMSBC)
+               v.AddArg(x)
+               v.AddArg(y)
+               v.AddArg(c)
                return true
        }
-       // match: (XORshiftRAreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Sub64F x y)
        // cond:
-       // result: (XORshiftRA x y [c])
+       // result: (SUBD x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMXORshiftRA)
+               v.reset(OpARMSUBD)
                v.AddArg(x)
                v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
+func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (XORshiftRL (MOVWconst [c]) x [d])
+       // match: (Sub8 x y)
        // cond:
-       // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+       // result: (SUB x y)
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               d := v.AuxInt
-               v.reset(OpARMXORconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-               v0.AddArg(x)
-               v0.AuxInt = d
-               v.AddArg(v0)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARMSUB)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
-       // match: (XORshiftRL x (MOVWconst [c]) [d])
+}
+func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (SubPtr x y)
        // cond:
-       // result: (XORconst x [int64(uint32(c)>>uint64(d))])
+       // result: (SUB x y)
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               v.reset(OpARMXORconst)
+               y := v.Args[1]
+               v.reset(OpARMSUB)
                v.AddArg(x)
-               v.AuxInt = int64(uint32(c) >> uint64(d))
+               v.AddArg(y)
                return true
        }
-       // match: (XORshiftRL x (SRLconst x [c]) [d])
-       // cond: c==d
-       // result: (MOVWconst [0])
+}
+func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc16to8 x)
+       // cond:
+       // result: x
        for {
                x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARMSRLconst {
-                       break
-               }
-               if x != v_1.Args[0] {
-                       break
-               }
-               c := v_1.AuxInt
-               d := v.AuxInt
-               if !(c == d) {
-                       break
-               }
-               v.reset(OpARMMOVWconst)
-               v.AuxInt = 0
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
+func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (XORshiftRLreg (MOVWconst [c]) x y)
+       // match: (Trunc32to16 x)
        // cond:
-       // result: (XORconst [c] (SRL <x.Type> x y))
+       // result: x
        for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               y := v.Args[2]
-               v.reset(OpARMXORconst)
-               v.AuxInt = c
-               v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               x := v.Args[0]
+               v.reset(OpCopy)
+               v.Type = x.Type
+               v.AddArg(x)
                return true
        }
-       // match: (XORshiftRLreg x y (MOVWconst [c]))
+}
+func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Trunc32to8 x)
        // cond:
-       // result: (XORshiftRL x y [c])
+       // result: x
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v_2 := v.Args[2]
-               if v_2.Op != OpARMMOVWconst {
-                       break
-               }
-               c := v_2.AuxInt
-               v.reset(OpARMXORshiftRL)
+               v.reset(OpCopy)
+               v.Type = x.Type
                v.AddArg(x)
-               v.AddArg(y)
-               v.AuxInt = c
                return true
        }
-       return false
 }
 func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
        b := v.Block
@@ -16756,8 +16756,8 @@ func rewriteValueARM_OpZero(v *Value, config *Config) bool {
                v.AuxInt = SizeAndAlign(s).Align()
                v.AddArg(ptr)
                v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
-               v0.AddArg(ptr)
                v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(ptr)
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
                v1.AuxInt = 0
@@ -16815,12 +16815,12 @@ func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
        for {
                x := v.Args[0]
                v.reset(OpARMSRAconst)
+               v.AuxInt = 31
                v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
+               v0.AuxInt = 1
                v0.AddArg(x)
                v0.AddArg(x)
-               v0.AuxInt = 1
                v.AddArg(v0)
-               v.AuxInt = 31
                return true
        }
 }
index ecde744d0fac8d742ce9d184b2363607ad20c5d7..e268f5907943eabd56a7f03905fe44dd8898811f 100644 (file)
@@ -10,6 +10,36 @@ func rewriteValueARM64(v *Value, config *Config) bool {
        switch v.Op {
        case OpARM64ADDconst:
                return rewriteValueARM64_OpARM64ADDconst(v, config)
+       case OpARM64FMOVDload:
+               return rewriteValueARM64_OpARM64FMOVDload(v, config)
+       case OpARM64FMOVDstore:
+               return rewriteValueARM64_OpARM64FMOVDstore(v, config)
+       case OpARM64FMOVSload:
+               return rewriteValueARM64_OpARM64FMOVSload(v, config)
+       case OpARM64FMOVSstore:
+               return rewriteValueARM64_OpARM64FMOVSstore(v, config)
+       case OpARM64MOVBUload:
+               return rewriteValueARM64_OpARM64MOVBUload(v, config)
+       case OpARM64MOVBload:
+               return rewriteValueARM64_OpARM64MOVBload(v, config)
+       case OpARM64MOVBstore:
+               return rewriteValueARM64_OpARM64MOVBstore(v, config)
+       case OpARM64MOVDload:
+               return rewriteValueARM64_OpARM64MOVDload(v, config)
+       case OpARM64MOVDstore:
+               return rewriteValueARM64_OpARM64MOVDstore(v, config)
+       case OpARM64MOVHUload:
+               return rewriteValueARM64_OpARM64MOVHUload(v, config)
+       case OpARM64MOVHload:
+               return rewriteValueARM64_OpARM64MOVHload(v, config)
+       case OpARM64MOVHstore:
+               return rewriteValueARM64_OpARM64MOVHstore(v, config)
+       case OpARM64MOVWUload:
+               return rewriteValueARM64_OpARM64MOVWUload(v, config)
+       case OpARM64MOVWload:
+               return rewriteValueARM64_OpARM64MOVWload(v, config)
+       case OpARM64MOVWstore:
+               return rewriteValueARM64_OpARM64MOVWstore(v, config)
        case OpAdd16:
                return rewriteValueARM64_OpAdd16(v, config)
        case OpAdd32:
@@ -132,14 +162,6 @@ func rewriteValueARM64(v *Value, config *Config) bool {
                return rewriteValueARM64_OpEqB(v, config)
        case OpEqPtr:
                return rewriteValueARM64_OpEqPtr(v, config)
-       case OpARM64FMOVDload:
-               return rewriteValueARM64_OpARM64FMOVDload(v, config)
-       case OpARM64FMOVDstore:
-               return rewriteValueARM64_OpARM64FMOVDstore(v, config)
-       case OpARM64FMOVSload:
-               return rewriteValueARM64_OpARM64FMOVSload(v, config)
-       case OpARM64FMOVSstore:
-               return rewriteValueARM64_OpARM64FMOVSstore(v, config)
        case OpGeq16:
                return rewriteValueARM64_OpGeq16(v, config)
        case OpGeq16U:
@@ -290,28 +312,6 @@ func rewriteValueARM64(v *Value, config *Config) bool {
                return rewriteValueARM64_OpLsh8x64(v, config)
        case OpLsh8x8:
                return rewriteValueARM64_OpLsh8x8(v, config)
-       case OpARM64MOVBUload:
-               return rewriteValueARM64_OpARM64MOVBUload(v, config)
-       case OpARM64MOVBload:
-               return rewriteValueARM64_OpARM64MOVBload(v, config)
-       case OpARM64MOVBstore:
-               return rewriteValueARM64_OpARM64MOVBstore(v, config)
-       case OpARM64MOVDload:
-               return rewriteValueARM64_OpARM64MOVDload(v, config)
-       case OpARM64MOVDstore:
-               return rewriteValueARM64_OpARM64MOVDstore(v, config)
-       case OpARM64MOVHUload:
-               return rewriteValueARM64_OpARM64MOVHUload(v, config)
-       case OpARM64MOVHload:
-               return rewriteValueARM64_OpARM64MOVHload(v, config)
-       case OpARM64MOVHstore:
-               return rewriteValueARM64_OpARM64MOVHstore(v, config)
-       case OpARM64MOVWUload:
-               return rewriteValueARM64_OpARM64MOVWUload(v, config)
-       case OpARM64MOVWload:
-               return rewriteValueARM64_OpARM64MOVWload(v, config)
-       case OpARM64MOVWstore:
-               return rewriteValueARM64_OpARM64MOVWstore(v, config)
        case OpMod16:
                return rewriteValueARM64_OpMod16(v, config)
        case OpMod16u:
@@ -542,1380 +542,1555 @@ func rewriteValueARM64_OpARM64ADDconst(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add16 x y)
+       // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (ADD x y)
+       // result: (FMOVDload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64FMOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpAdd32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add32 x y)
-       // cond:
-       // result: (ADD x y)
+       // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64FMOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAdd32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add32F x y)
+       // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (FADDS x y)
+       // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64FADDS)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64FMOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpAdd64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add64 x y)
-       // cond:
-       // result: (ADD x y)
+       // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64FMOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAdd64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Add64F x y)
+       // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (FADDD x y)
+       // result: (FMOVSload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64FADDD)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64FMOVSload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpAdd8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Add8 x y)
-       // cond:
-       // result: (ADD x y)
+       // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64FMOVSload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAddPtr(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (AddPtr x y)
+       // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (ADD x y)
-       for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v.AddArg(x)
-               v.AddArg(y)
-               return true
-       }
-}
-func rewriteValueARM64_OpAddr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Addr {sym} base)
-       // cond:
-       // result: (MOVDaddr {sym} base)
+       // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
        for {
+               off1 := v.AuxInt
                sym := v.Aux
-               base := v.Args[0]
-               v.reset(OpARM64MOVDaddr)
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64FMOVSstore)
+               v.AuxInt = off1 + off2
                v.Aux = sym
-               v.AddArg(base)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpAnd16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And16 x y)
-       // cond:
-       // result: (AND x y)
+       // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64FMOVSstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAnd32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (And32 x y)
+       // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (AND x y)
+       // result: (MOVBUload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpAnd64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (And64 x y)
-       // cond:
-       // result: (AND x y)
+       // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVBUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAnd8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (And8 x y)
+       // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (AND x y)
+       // result: (MOVBload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpAndB(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (AndB x y)
-       // cond:
-       // result: (AND x y)
+       // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64AND)
-               v.AddArg(x)
-               v.AddArg(y)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVBload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpAvg64u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Avg64u <t> x y)
+       // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (ADD (ADD <t> (SRLconst <t> x [1]) (SRLconst <t> y [1])) (AND <t> (AND <t> x y) (MOVDconst [1])))
+       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
        for {
-               t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64ADD)
-               v0 := b.NewValue0(v.Line, OpARM64ADD, t)
-               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-               v1.AddArg(x)
-               v1.AuxInt = 1
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-               v2.AddArg(y)
-               v2.AuxInt = 1
-               v0.AddArg(v2)
-               v.AddArg(v0)
-               v3 := b.NewValue0(v.Line, OpARM64AND, t)
-               v4 := b.NewValue0(v.Line, OpARM64AND, t)
-               v4.AddArg(x)
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v5 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v5.AuxInt = 1
-               v3.AddArg(v5)
-               v.AddArg(v3)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpClosureCall(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ClosureCall [argwid] entry closure mem)
+       // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (CALLclosure [argwid] entry closure mem)
+       // result: (MOVDload [off1+off2] {sym} ptr mem)
        for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               closure := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64CALLclosure)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(closure)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
                v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpCom16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com16 x)
-       // cond:
-       // result: (MVN x)
+       // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MVN)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVDload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpCom32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Com32 x)
+       // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (MVN x)
+       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MVN)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpCom64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Com64 x)
-       // cond:
-       // result: (MVN x)
+       // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MVN)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpCom8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Com8 x)
+       // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (MVN x)
+       // result: (MOVHUload [off1+off2] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64MVN)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpConst16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const16 [val])
-       // cond:
-       // result: (MOVDconst [val])
+       // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               val := v.AuxInt
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVHUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpConst32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const32 [val])
+       // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (MOVDconst [val])
+       // result: (MOVHload [off1+off2] {sym} ptr mem)
        for {
-               val := v.AuxInt
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpConst32F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const32F [val])
-       // cond:
-       // result: (FMOVSconst [val])
+       // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               val := v.AuxInt
-               v.reset(OpARM64FMOVSconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVHload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpConst64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const64 [val])
+       // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (MOVDconst [val])
+       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
        for {
-               val := v.AuxInt
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpConst64F(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Const64F [val])
-       // cond:
-       // result: (FMOVDconst [val])
+       // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
        for {
-               val := v.AuxInt
-               v.reset(OpARM64FMOVDconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpConst8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Const8 [val])
+       // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (MOVDconst [val])
+       // result: (MOVWUload [off1+off2] {sym} ptr mem)
        for {
-               val := v.AuxInt
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = val
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVWUload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpConstBool(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ConstBool [b])
-       // cond:
-       // result: (MOVDconst [b])
+       // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               b := v.AuxInt
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = b
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVWUload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpConstNil(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (ConstNil)
+       // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
        // cond:
-       // result: (MOVDconst [0])
+       // result: (MOVWload [off1+off2] {sym} ptr mem)
        for {
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpConvert(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Convert x mem)
-       // cond:
-       // result: (MOVDconvert x mem)
+       // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
        for {
-               x := v.Args[0]
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
                mem := v.Args[1]
-               v.reset(OpARM64MOVDconvert)
-               v.AddArg(x)
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVWload)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
                v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpCvt32Fto32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Fto32 x)
+       // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
        // cond:
-       // result: (FCVTZSSW x)
+       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
        for {
-               x := v.Args[0]
-               v.reset(OpARM64FCVTZSSW)
-               v.AddArg(x)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+       // cond: canMergeSym(sym1,sym2)
+       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       for {
+               off1 := v.AuxInt
+               sym1 := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpARM64MOVDaddr {
+                       break
+               }
+               off2 := v_0.AuxInt
+               sym2 := v_0.Aux
+               ptr := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(canMergeSym(sym1, sym2)) {
+                       break
+               }
+               v.reset(OpARM64MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = mergeSym(sym1, sym2)
+               v.AddArg(ptr)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValueARM64_OpCvt32Fto32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Fto32U x)
+       // match: (Add16 x y)
        // cond:
-       // result: (FCVTZUSW x)
+       // result: (ADD x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64FCVTZUSW)
+               y := v.Args[1]
+               v.reset(OpARM64ADD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt32Fto64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Fto64 x)
+       // match: (Add32 x y)
        // cond:
-       // result: (FCVTZSS x)
+       // result: (ADD x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64FCVTZSS)
+               y := v.Args[1]
+               v.reset(OpARM64ADD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt32Fto64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Fto64F x)
+       // match: (Add32F x y)
        // cond:
-       // result: (FCVTSD x)
+       // result: (FADDS x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64FCVTSD)
+               y := v.Args[1]
+               v.reset(OpARM64FADDS)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt32Uto32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Uto32F x)
+       // match: (Add64 x y)
        // cond:
-       // result: (UCVTFWS x)
+       // result: (ADD x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64UCVTFWS)
+               y := v.Args[1]
+               v.reset(OpARM64ADD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt32Uto64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32Uto64F x)
+       // match: (Add64F x y)
        // cond:
-       // result: (UCVTFWD x)
+       // result: (FADDD x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64UCVTFWD)
+               y := v.Args[1]
+               v.reset(OpARM64FADDD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt32to32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAdd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32to32F x)
+       // match: (Add8 x y)
        // cond:
-       // result: (SCVTFWS x)
+       // result: (ADD x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64SCVTFWS)
+               y := v.Args[1]
+               v.reset(OpARM64ADD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt32to64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAddPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt32to64F x)
+       // match: (AddPtr x y)
        // cond:
-       // result: (SCVTFWD x)
+       // result: (ADD x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64SCVTFWD)
+               y := v.Args[1]
+               v.reset(OpARM64ADD)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt64Fto32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAddr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64Fto32 x)
+       // match: (Addr {sym} base)
        // cond:
-       // result: (FCVTZSDW x)
+       // result: (MOVDaddr {sym} base)
+       for {
+               sym := v.Aux
+               base := v.Args[0]
+               v.reset(OpARM64MOVDaddr)
+               v.Aux = sym
+               v.AddArg(base)
+               return true
+       }
+}
+func rewriteValueARM64_OpAnd16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (And16 x y)
+       // cond:
+       // result: (AND x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64FCVTZSDW)
+               y := v.Args[1]
+               v.reset(OpARM64AND)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt64Fto32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAnd32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64Fto32F x)
+       // match: (And32 x y)
        // cond:
-       // result: (FCVTDS x)
+       // result: (AND x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64FCVTDS)
+               y := v.Args[1]
+               v.reset(OpARM64AND)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt64Fto32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAnd64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64Fto32U x)
+       // match: (And64 x y)
        // cond:
-       // result: (FCVTZUDW x)
+       // result: (AND x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64FCVTZUDW)
+               y := v.Args[1]
+               v.reset(OpARM64AND)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt64Fto64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAnd8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64Fto64 x)
+       // match: (And8 x y)
        // cond:
-       // result: (FCVTZSD x)
+       // result: (AND x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64FCVTZSD)
+               y := v.Args[1]
+               v.reset(OpARM64AND)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt64to32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAndB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64to32F x)
+       // match: (AndB x y)
        // cond:
-       // result: (SCVTFS x)
+       // result: (AND x y)
        for {
                x := v.Args[0]
-               v.reset(OpARM64SCVTFS)
+               y := v.Args[1]
+               v.reset(OpARM64AND)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpCvt64to64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpAvg64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Cvt64to64F x)
+       // match: (Avg64u <t> x y)
        // cond:
-       // result: (SCVTFD x)
+       // result: (ADD (ADD <t> (SRLconst <t> x [1]) (SRLconst <t> y [1])) (AND <t> (AND <t> x y) (MOVDconst [1])))
        for {
+               t := v.Type
                x := v.Args[0]
-               v.reset(OpARM64SCVTFD)
-               v.AddArg(x)
+               y := v.Args[1]
+               v.reset(OpARM64ADD)
+               v0 := b.NewValue0(v.Line, OpARM64ADD, t)
+               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+               v1.AuxInt = 1
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+               v2.AuxInt = 1
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               v3 := b.NewValue0(v.Line, OpARM64AND, t)
+               v4 := b.NewValue0(v.Line, OpARM64AND, t)
+               v4.AddArg(x)
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v5 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v5.AuxInt = 1
+               v3.AddArg(v5)
+               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM64_OpDeferCall(v *Value, config *Config) bool {
+func rewriteValueARM64_OpClosureCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (DeferCall [argwid] mem)
+       // match: (ClosureCall [argwid] entry closure mem)
        // cond:
-       // result: (CALLdefer [argwid] mem)
+       // result: (CALLclosure [argwid] entry closure mem)
        for {
                argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpARM64CALLdefer)
+               entry := v.Args[0]
+               closure := v.Args[1]
+               mem := v.Args[2]
+               v.reset(OpARM64CALLclosure)
                v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(closure)
                v.AddArg(mem)
                return true
        }
 }
-func rewriteValueARM64_OpDiv16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCom16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div16 x y)
+       // match: (Com16 x)
        // cond:
-       // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64DIVW)
-               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.reset(OpARM64MVN)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpDiv16u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCom32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div16u x y)
+       // match: (Com32 x)
        // cond:
-       // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UDIVW)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.reset(OpARM64MVN)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpDiv32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCom64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div32 x y)
+       // match: (Com64 x)
        // cond:
-       // result: (DIVW x y)
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64DIVW)
+               v.reset(OpARM64MVN)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpDiv32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCom8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div32F x y)
+       // match: (Com8 x)
        // cond:
-       // result: (FDIVS x y)
+       // result: (MVN x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64FDIVS)
+               v.reset(OpARM64MVN)
                v.AddArg(x)
-               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpDiv32u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div32u x y)
+       // match: (Const16 [val])
        // cond:
-       // result: (UDIVW x y)
+       // result: (MOVDconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UDIVW)
-               v.AddArg(x)
-               v.AddArg(y)
+               val := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpDiv64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div64 x y)
+       // match: (Const32 [val])
        // cond:
-       // result: (DIV x y)
+       // result: (MOVDconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64DIV)
-               v.AddArg(x)
-               v.AddArg(y)
+               val := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpDiv64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div64F x y)
+       // match: (Const32F [val])
        // cond:
-       // result: (FDIVD x y)
+       // result: (FMOVSconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64FDIVD)
-               v.AddArg(x)
-               v.AddArg(y)
+               val := v.AuxInt
+               v.reset(OpARM64FMOVSconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpDiv64u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div64u x y)
+       // match: (Const64 [val])
        // cond:
-       // result: (UDIV x y)
+       // result: (MOVDconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UDIV)
-               v.AddArg(x)
-               v.AddArg(y)
+               val := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpDiv8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div8 x y)
+       // match: (Const64F [val])
        // cond:
-       // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+       // result: (FMOVDconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64DIVW)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               val := v.AuxInt
+               v.reset(OpARM64FMOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpDiv8u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConst8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Div8u x y)
+       // match: (Const8 [val])
        // cond:
-       // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+       // result: (MOVDconst [val])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64UDIVW)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               val := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = val
                return true
        }
 }
-func rewriteValueARM64_OpEq16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConstBool(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq16 x y)
+       // match: (ConstBool [b])
        // cond:
-       // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (MOVDconst [b])
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               b := v.AuxInt
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = b
                return true
        }
 }
-func rewriteValueARM64_OpEq32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpConstNil(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq32 x y)
+       // match: (ConstNil)
        // cond:
-       // result: (Equal (CMPW x y))
+       // result: (MOVDconst [0])
+       for {
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
+               return true
+       }
+}
+func rewriteValueARM64_OpConvert(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Convert x mem)
+       // cond:
+       // result: (MOVDconvert x mem)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               mem := v.Args[1]
+               v.reset(OpARM64MOVDconvert)
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueARM64_OpEq32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq32F x y)
+       // match: (Cvt32Fto32 x)
        // cond:
-       // result: (Equal (FCMPS x y))
+       // result: (FCVTZSSW x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64FCVTZSSW)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpEq64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Fto32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq64 x y)
+       // match: (Cvt32Fto32U x)
        // cond:
-       // result: (Equal (CMP x y))
+       // result: (FCVTZUSW x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64FCVTZUSW)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpEq64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Fto64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq64F x y)
+       // match: (Cvt32Fto64 x)
        // cond:
-       // result: (Equal (FCMPD x y))
+       // result: (FCVTZSS x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64FCVTZSS)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpEq8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Fto64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Eq8 x y)
+       // match: (Cvt32Fto64F x)
        // cond:
-       // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (FCVTSD x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v.reset(OpARM64FCVTSD)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpEqB(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Uto32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (EqB x y)
+       // match: (Cvt32Uto32F x)
        // cond:
-       // result: (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
+       // result: (UCVTFWS x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64XOR)
-               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-               v0.AuxInt = 1
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64XOR, config.fe.TypeBool())
-               v1.AddArg(x)
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.reset(OpARM64UCVTFWS)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32Uto64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (EqPtr x y)
+       // match: (Cvt32Uto64F x)
        // cond:
-       // result: (Equal (CMP x y))
+       // result: (UCVTFWD x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64Equal)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64UCVTFWD)
+               v.AddArg(x)
                return true
        }
 }
-func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt32to32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Cvt32to32F x)
        // cond:
-       // result: (FMOVDload [off1+off2] {sym} ptr mem)
+       // result: (SCVTFWS x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64FMOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARM64SCVTFWS)
+               v.AddArg(x)
                return true
        }
-       // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM64_OpCvt32to64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt32to64F x)
+       // cond:
+       // result: (SCVTFWD x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64FMOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARM64SCVTFWD)
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt64Fto32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (Cvt64Fto32 x)
        // cond:
-       // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+       // result: (FCVTZSDW x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64FMOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARM64FCVTZSDW)
+               v.AddArg(x)
                return true
        }
-       // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+}
+func rewriteValueARM64_OpCvt64Fto32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt64Fto32F x)
+       // cond:
+       // result: (FCVTDS x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64FMOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARM64FCVTDS)
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt64Fto32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Cvt64Fto32U x)
        // cond:
-       // result: (FMOVSload [off1+off2] {sym} ptr mem)
+       // result: (FCVTZUDW x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64FMOVSload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARM64FCVTZUDW)
+               v.AddArg(x)
                return true
        }
-       // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+}
+func rewriteValueARM64_OpCvt64Fto64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt64Fto64 x)
+       // cond:
+       // result: (FCVTZSD x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64FMOVSload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARM64FCVTZSD)
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpCvt64to32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (Cvt64to32F x)
        // cond:
-       // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+       // result: (SCVTFS x)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64FMOVSstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARM64SCVTFS)
+               v.AddArg(x)
                return true
        }
-       // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+}
+func rewriteValueARM64_OpCvt64to64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Cvt64to64F x)
+       // cond:
+       // result: (SCVTFD x)
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64FMOVSstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               x := v.Args[0]
+               v.reset(OpARM64SCVTFD)
+               v.AddArg(x)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDeferCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq16 x y)
+       // match: (DeferCall [argwid] mem)
        // cond:
-       // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (CALLdefer [argwid] mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpARM64CALLdefer)
+               v.AuxInt = argwid
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueARM64_OpGeq16U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq16U x y)
+       // match: (Div16 x y)
        // cond:
-       // result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               v.reset(OpARM64DIVW)
+               v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v0.AddArg(x)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpGeq32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq32 x y)
+       // match: (Div16u x y)
        // cond:
-       // result: (GreaterEqual (CMPW x y))
+       // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v.reset(OpARM64UDIVW)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v0.AddArg(x)
-               v0.AddArg(y)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpGeq32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq32F x y)
+       // match: (Div32 x y)
        // cond:
-       // result: (GreaterEqual (FCMPS x y))
+       // result: (DIVW x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64DIVW)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpGeq32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq32U x y)
+       // match: (Div32F x y)
        // cond:
-       // result: (GreaterEqualU (CMPW x y))
+       // result: (FDIVS x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64FDIVS)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpGeq64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv32u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq64 x y)
+       // match: (Div32u x y)
        // cond:
-       // result: (GreaterEqual (CMP x y))
+       // result: (UDIVW x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64UDIVW)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpGeq64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq64F x y)
+       // match: (Div64 x y)
        // cond:
-       // result: (GreaterEqual (FCMPD x y))
+       // result: (DIV x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64DIV)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpGeq64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq64U x y)
+       // match: (Div64F x y)
        // cond:
-       // result: (GreaterEqualU (CMP x y))
+       // result: (FDIVD x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64FDIVD)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpGeq8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq8 x y)
+       // match: (Div64u x y)
        // cond:
-       // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (UDIV x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v.reset(OpARM64UDIV)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpGeq8U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Geq8U x y)
+       // match: (Div8 x y)
        // cond:
-       // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               v.reset(OpARM64DIVW)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpGetClosurePtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GetClosurePtr)
-       // cond:
-       // result: (LoweredGetClosurePtr)
-       for {
-               v.reset(OpARM64LoweredGetClosurePtr)
-               return true
-       }
-}
-func rewriteValueARM64_OpGoCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (GoCall [argwid] mem)
-       // cond:
-       // result: (CALLgo [argwid] mem)
-       for {
-               argwid := v.AuxInt
-               mem := v.Args[0]
-               v.reset(OpARM64CALLgo)
-               v.AuxInt = argwid
-               v.AddArg(mem)
-               return true
-       }
-}
-func rewriteValueARM64_OpGreater16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpDiv8u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater16 x y)
+       // match: (Div8u x y)
        // cond:
-       // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               v.reset(OpARM64UDIVW)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v0.AddArg(x)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater16U x y)
+       // match: (Eq16 x y)
        // cond:
-       // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThanU)
+               v.reset(OpARM64Equal)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v1.AddArg(x)
@@ -1927,16 +2102,16 @@ func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater32 x y)
+       // match: (Eq32 x y)
        // cond:
-       // result: (GreaterThan (CMPW x y))
+       // result: (Equal (CMPW x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
+               v.reset(OpARM64Equal)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
@@ -1944,16 +2119,16 @@ func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater32F x y)
+       // match: (Eq32F x y)
        // cond:
-       // result: (GreaterThan (FCMPS x y))
+       // result: (Equal (FCMPS x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
+               v.reset(OpARM64Equal)
                v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
@@ -1961,347 +2136,320 @@ func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpGreater32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater32U x y)
+       // match: (Eq64 x y)
        // cond:
-       // result: (GreaterThanU (CMPW x y))
+       // result: (Equal (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpGreater64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater64 x y)
+       // match: (Eq64F x y)
        // cond:
-       // result: (GreaterThan (CMP x y))
+       // result: (Equal (FCMPD x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpGreater64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater64F x y)
+       // match: (Eq8 x y)
        // cond:
-       // result: (GreaterThan (FCMPD x y))
+       // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpGreater64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEqB(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater64U x y)
+       // match: (EqB x y)
        // cond:
-       // result: (GreaterThanU (CMP x y))
+       // result: (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               v.reset(OpARM64XOR)
+               v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
+               v0.AuxInt = 1
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64XOR, config.fe.TypeBool())
+               v1.AddArg(x)
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpGreater8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater8 x y)
+       // match: (EqPtr x y)
        // cond:
-       // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (Equal (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               v.reset(OpARM64Equal)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpGreater8U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Greater8U x y)
+       // match: (Geq16 x y)
        // cond:
-       // result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThanU)
+               v.reset(OpARM64GreaterEqual)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v2.AddArg(y)
                v0.AddArg(v2)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpHmul16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul16 x y)
+       // match: (Geq16U x y)
        // cond:
-       // result: (SRAconst (MULW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
+       // result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRAconst)
-               v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt32())
-               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v.reset(OpARM64GreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v2.AddArg(y)
                v0.AddArg(v2)
                v.AddArg(v0)
-               v.AuxInt = 16
                return true
        }
 }
-func rewriteValueARM64_OpHmul16u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul16u x y)
+       // match: (Geq32 x y)
        // cond:
-       // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
+       // result: (GreaterEqual (CMPW x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRLconst)
-               v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt32())
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AuxInt = 16
                return true
        }
 }
-func rewriteValueARM64_OpHmul32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul32 x y)
+       // match: (Geq32F x y)
        // cond:
-       // result: (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
+       // result: (GreaterEqual (FCMPS x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRAconst)
-               v0 := b.NewValue0(v.Line, OpARM64MULL, config.fe.TypeInt64())
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v.AuxInt = 32
                return true
        }
 }
-func rewriteValueARM64_OpHmul32u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul32u x y)
+       // match: (Geq32U x y)
        // cond:
-       // result: (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
+       // result: (GreaterEqualU (CMPW x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRAconst)
-               v0 := b.NewValue0(v.Line, OpARM64UMULL, config.fe.TypeUInt64())
+               v.reset(OpARM64GreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
-               v.AuxInt = 32
                return true
        }
 }
-func rewriteValueARM64_OpHmul64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul64 x y)
+       // match: (Geq64 x y)
        // cond:
-       // result: (MULH x y)
+       // result: (GreaterEqual (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64MULH)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpHmul64u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul64u x y)
+       // match: (Geq64F x y)
        // cond:
-       // result: (UMULH x y)
+       // result: (GreaterEqual (FCMPD x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64UMULH)
-               v.AddArg(x)
-               v.AddArg(y)
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpHmul8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq64U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul8 x y)
+       // match: (Geq64U x y)
        // cond:
-       // result: (SRAconst (MULW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
+       // result: (GreaterEqualU (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRAconst)
-               v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt16())
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
+               v.reset(OpARM64GreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v.AuxInt = 8
                return true
        }
 }
-func rewriteValueARM64_OpHmul8u(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Hmul8u x y)
+       // match: (Geq8 x y)
        // cond:
-       // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
+       // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64SRLconst)
-               v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt16())
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
                v2.AddArg(y)
                v0.AddArg(v2)
                v.AddArg(v0)
-               v.AuxInt = 8
-               return true
-       }
-}
-func rewriteValueARM64_OpInterCall(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (InterCall [argwid] entry mem)
-       // cond:
-       // result: (CALLinter [argwid] entry mem)
-       for {
-               argwid := v.AuxInt
-               entry := v.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64CALLinter)
-               v.AuxInt = argwid
-               v.AddArg(entry)
-               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueARM64_OpIsInBounds(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGeq8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsInBounds idx len)
+       // match: (Geq8U x y)
        // cond:
-       // result: (LessThanU (CMP idx len))
+       // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpARM64LessThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpIsNonNil(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGetClosurePtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsNonNil ptr)
+       // match: (GetClosurePtr)
        // cond:
-       // result: (NotEqual (CMPconst [0] ptr))
+       // result: (LoweredGetClosurePtr)
        for {
-               ptr := v.Args[0]
-               v.reset(OpARM64NotEqual)
-               v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v0.AuxInt = 0
-               v0.AddArg(ptr)
-               v.AddArg(v0)
+               v.reset(OpARM64LoweredGetClosurePtr)
                return true
        }
 }
-func rewriteValueARM64_OpIsSliceInBounds(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGoCall(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (IsSliceInBounds idx len)
+       // match: (GoCall [argwid] mem)
        // cond:
-       // result: (LessEqualU (CMP idx len))
+       // result: (CALLgo [argwid] mem)
        for {
-               idx := v.Args[0]
-               len := v.Args[1]
-               v.reset(OpARM64LessEqualU)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(idx)
-               v0.AddArg(len)
-               v.AddArg(v0)
+               argwid := v.AuxInt
+               mem := v.Args[0]
+               v.reset(OpARM64CALLgo)
+               v.AuxInt = argwid
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq16 x y)
+       // match: (Greater16 x y)
        // cond:
-       // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessEqual)
+               v.reset(OpARM64GreaterThan)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v1.AddArg(x)
@@ -2313,16 +2461,16 @@ func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq16U x y)
+       // match: (Greater16U x y)
        // cond:
-       // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessEqualU)
+               v.reset(OpARM64GreaterThanU)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v1.AddArg(x)
@@ -2334,16 +2482,16 @@ func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq32 x y)
+       // match: (Greater32 x y)
        // cond:
-       // result: (LessEqual (CMPW x y))
+       // result: (GreaterThan (CMPW x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessEqual)
+               v.reset(OpARM64GreaterThan)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
@@ -2351,33 +2499,33 @@ func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLeq32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq32F x y)
+       // match: (Greater32F x y)
        // cond:
-       // result: (GreaterEqual (FCMPS y x))
+       // result: (GreaterThan (FCMPS x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
+               v.reset(OpARM64GreaterThan)
                v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(y)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq32U x y)
+       // match: (Greater32U x y)
        // cond:
-       // result: (LessEqualU (CMPW x y))
+       // result: (GreaterThanU (CMPW x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessEqualU)
+               v.reset(OpARM64GreaterThanU)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
@@ -2385,16 +2533,16 @@ func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq64 x y)
+       // match: (Greater64 x y)
        // cond:
-       // result: (LessEqual (CMP x y))
+       // result: (GreaterThan (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessEqual)
+               v.reset(OpARM64GreaterThan)
                v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
@@ -2402,33 +2550,33 @@ func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLeq64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq64F x y)
+       // match: (Greater64F x y)
        // cond:
-       // result: (GreaterEqual (FCMPD y x))
+       // result: (GreaterThan (FCMPD x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterEqual)
+               v.reset(OpARM64GreaterThan)
                v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(y)
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater64U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq64U x y)
+       // match: (Greater64U x y)
        // cond:
-       // result: (LessEqualU (CMP x y))
+       // result: (GreaterThanU (CMP x y))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessEqualU)
+               v.reset(OpARM64GreaterThanU)
                v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
                v0.AddArg(x)
                v0.AddArg(y)
@@ -2436,16 +2584,16 @@ func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq8 x y)
+       // match: (Greater8 x y)
        // cond:
-       // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessEqual)
+               v.reset(OpARM64GreaterThan)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
                v1.AddArg(x)
@@ -2457,16 +2605,16 @@ func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpGreater8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Leq8U x y)
+       // match: (Greater8U x y)
        // cond:
-       // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessEqualU)
+               v.reset(OpARM64GreaterThanU)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
                v1.AddArg(x)
@@ -2478,17 +2626,18 @@ func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less16 x y)
+       // match: (Hmul16 x y)
        // cond:
-       // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+       // result: (SRAconst (MULW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt32())
                v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
@@ -2499,17 +2648,18 @@ func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul16u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less16U x y)
+       // match: (Hmul16u x y)
        // cond:
-       // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = 16
+               v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt32())
                v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
@@ -2520,691 +2670,804 @@ func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLess32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less32 x y)
+       // match: (Hmul32 x y)
        // cond:
-       // result: (LessThan (CMPW x y))
+       // result: (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 32
+               v0 := b.NewValue0(v.Line, OpARM64MULL, config.fe.TypeInt64())
                v0.AddArg(x)
                v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLess32F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul32u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less32F x y)
+       // match: (Hmul32u x y)
        // cond:
-       // result: (GreaterThan (FCMPS y x))
+       // result: (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-               v0.AddArg(y)
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 32
+               v0 := b.NewValue0(v.Line, OpARM64UMULL, config.fe.TypeUInt64())
                v0.AddArg(x)
+               v0.AddArg(y)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLess32U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less32U x y)
+       // match: (Hmul64 x y)
        // cond:
-       // result: (LessThanU (CMPW x y))
+       // result: (MULH x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessThanU)
-               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v.reset(OpARM64MULH)
+               v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValueARM64_OpLess64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul64u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less64 x y)
+       // match: (Hmul64u x y)
        // cond:
-       // result: (LessThan (CMP x y))
+       // result: (UMULH x y)
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessThan)
-               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               return true
+               v.reset(OpARM64UMULH)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
        }
 }
-func rewriteValueARM64_OpLess64F(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less64F x y)
+       // match: (Hmul8 x y)
        // cond:
-       // result: (GreaterThan (FCMPD y x))
+       // result: (SRAconst (MULW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64GreaterThan)
-               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-               v0.AddArg(y)
-               v0.AddArg(x)
+               v.reset(OpARM64SRAconst)
+               v.AuxInt = 8
+               v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt16())
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLess64U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpHmul8u(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less64U x y)
+       // match: (Hmul8u x y)
        // cond:
-       // result: (LessThanU (CMP x y))
+       // result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
        for {
                x := v.Args[0]
                y := v.Args[1]
+               v.reset(OpARM64SRLconst)
+               v.AuxInt = 8
+               v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt16())
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpInterCall(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (InterCall [argwid] entry mem)
+       // cond:
+       // result: (CALLinter [argwid] entry mem)
+       for {
+               argwid := v.AuxInt
+               entry := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpARM64CALLinter)
+               v.AuxInt = argwid
+               v.AddArg(entry)
+               v.AddArg(mem)
+               return true
+       }
+}
+func rewriteValueARM64_OpIsInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsInBounds idx len)
+       // cond:
+       // result: (LessThanU (CMP idx len))
+       for {
+               idx := v.Args[0]
+               len := v.Args[1]
                v.reset(OpARM64LessThanU)
                v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
+               v0.AddArg(idx)
+               v0.AddArg(len)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLess8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpIsNonNil(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less8 x y)
+       // match: (IsNonNil ptr)
        // cond:
-       // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+       // result: (NotEqual (CMPconst [0] ptr))
+       for {
+               ptr := v.Args[0]
+               v.reset(OpARM64NotEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v0.AuxInt = 0
+               v0.AddArg(ptr)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpIsSliceInBounds(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (IsSliceInBounds idx len)
+       // cond:
+       // result: (LessEqualU (CMP idx len))
+       for {
+               idx := v.Args[0]
+               len := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(idx)
+               v0.AddArg(len)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq16 x y)
+       // cond:
+       // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessThan)
+               v.reset(OpARM64LessEqual)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
                v2.AddArg(y)
                v0.AddArg(v2)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLess8U(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Less8U x y)
+       // match: (Leq16U x y)
        // cond:
-       // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64LessThanU)
+               v.reset(OpARM64LessEqualU)
                v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v1.AddArg(x)
                v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
                v2.AddArg(y)
                v0.AddArg(v2)
                v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLoad(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Load <t> ptr mem)
-       // cond: t.IsBoolean()
-       // result: (MOVBUload ptr mem)
-       for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(t.IsBoolean()) {
-                       break
-               }
-               v.reset(OpARM64MOVBUload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Load <t> ptr mem)
-       // cond: (is8BitInt(t) && isSigned(t))
-       // result: (MOVBload ptr mem)
+       // match: (Leq32 x y)
+       // cond:
+       // result: (LessEqual (CMPW x y))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is8BitInt(t) && isSigned(t)) {
-                       break
-               }
-               v.reset(OpARM64MOVBload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is8BitInt(t) && !isSigned(t))
-       // result: (MOVBUload ptr mem)
+}
+func rewriteValueARM64_OpLeq32F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32F x y)
+       // cond:
+       // result: (GreaterEqual (FCMPS y x))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is8BitInt(t) && !isSigned(t)) {
-                       break
-               }
-               v.reset(OpARM64MOVBUload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is16BitInt(t) && isSigned(t))
-       // result: (MOVHload ptr mem)
+}
+func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq32U x y)
+       // cond:
+       // result: (LessEqualU (CMPW x y))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t) && isSigned(t)) {
-                       break
-               }
-               v.reset(OpARM64MOVHload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is16BitInt(t) && !isSigned(t))
-       // result: (MOVHUload ptr mem)
+}
+func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq64 x y)
+       // cond:
+       // result: (LessEqual (CMP x y))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is16BitInt(t) && !isSigned(t)) {
-                       break
-               }
-               v.reset(OpARM64MOVHUload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is32BitInt(t) && isSigned(t))
-       // result: (MOVWload ptr mem)
+}
+func rewriteValueARM64_OpLeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq64F x y)
+       // cond:
+       // result: (GreaterEqual (FCMPD y x))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitInt(t) && isSigned(t)) {
-                       break
-               }
-               v.reset(OpARM64MOVWload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Load <t> ptr mem)
-       // cond: (is32BitInt(t) && !isSigned(t))
-       // result: (MOVWUload ptr mem)
-       for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitInt(t) && !isSigned(t)) {
-                       break
-               }
-               v.reset(OpARM64MOVWUload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64GreaterEqual)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
+               v0.AddArg(y)
+               v0.AddArg(x)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: (is64BitInt(t) || isPtr(t))
-       // result: (MOVDload ptr mem)
+}
+func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq64U x y)
+       // cond:
+       // result: (LessEqualU (CMP x y))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is64BitInt(t) || isPtr(t)) {
-                       break
-               }
-               v.reset(OpARM64MOVDload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is32BitFloat(t)
-       // result: (FMOVSload ptr mem)
+}
+func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq8 x y)
+       // cond:
+       // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is32BitFloat(t)) {
-                       break
-               }
-               v.reset(OpARM64FMOVSload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqual)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       // match: (Load <t> ptr mem)
-       // cond: is64BitFloat(t)
-       // result: (FMOVDload ptr mem)
+}
+func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Leq8U x y)
+       // cond:
+       // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
-               t := v.Type
-               ptr := v.Args[0]
-               mem := v.Args[1]
-               if !(is64BitFloat(t)) {
-                       break
-               }
-               v.reset(OpARM64FMOVDload)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64LessEqualU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpLrot16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot16 <t> x [c])
+       // match: (Less16 x y)
        // cond:
-       // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
+       // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
        for {
-               t := v.Type
                x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARM64OR)
-               v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
-               v0.AddArg(x)
-               v0.AuxInt = c & 15
+               y := v.Args[1]
+               v.reset(OpARM64LessThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v2.AddArg(x)
-               v1.AddArg(v2)
-               v1.AuxInt = 16 - c&15
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpLrot32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot32 x [c])
+       // match: (Less16U x y)
        // cond:
-       // result: (RORWconst x [32-c&31])
+       // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
        for {
                x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARM64RORWconst)
-               v.AddArg(x)
-               v.AuxInt = 32 - c&31
+               y := v.Args[1]
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLrot64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot64 x [c])
+       // match: (Less32 x y)
        // cond:
-       // result: (RORconst  x [64-c&63])
+       // result: (LessThan (CMPW x y))
        for {
                x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARM64RORconst)
-               v.AddArg(x)
-               v.AuxInt = 64 - c&63
+               y := v.Args[1]
+               v.reset(OpARM64LessThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
                return true
        }
 }
-func rewriteValueARM64_OpLrot8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess32F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lrot8  <t> x [c])
+       // match: (Less32F x y)
        // cond:
-       // result: (OR (SLLconst <t> x [c&7])  (SRLconst <t> (ZeroExt8to64  x) [8-c&7]))
+       // result: (GreaterThan (FCMPS y x))
        for {
-               t := v.Type
                x := v.Args[0]
-               c := v.AuxInt
-               v.reset(OpARM64OR)
-               v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
+               y := v.Args[1]
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
+               v0.AddArg(y)
                v0.AddArg(x)
-               v0.AuxInt = c & 7
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v2.AddArg(x)
-               v1.AddArg(v2)
-               v1.AuxInt = 8 - c&7
-               v.AddArg(v1)
                return true
        }
 }
-func rewriteValueARM64_OpLsh16x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess32U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x16 <t> x y)
+       // match: (Less32U x y)
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (LessThanU (CMPW x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
                v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM64_OpLsh16x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x32 <t> x y)
+       // match: (Less64 x y)
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (LessThan (CMP x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v.reset(OpARM64LessThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
                v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM64_OpLsh16x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess64F(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 16
-       // result: (SLLconst x [c])
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 16) {
-                       break
-               }
-               v.reset(OpARM64SLLconst)
-               v.AddArg(x)
-               v.AuxInt = c
-               return true
-       }
-       // match: (Lsh16x64  _ (MOVDconst [c]))
-       // cond: uint64(c) >= 16
-       // result: (MOVDconst [0])
-       for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 16) {
-                       break
-               }
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
-               return true
-       }
-       // match: (Lsh16x64 <t> x y)
+       // match: (Less64F x y)
        // cond:
-       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       // result: (GreaterThan (FCMPD y x))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
+               v.reset(OpARM64GreaterThan)
+               v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
                v0.AddArg(y)
+               v0.AddArg(x)
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpConst64, t)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v2.AuxInt = 64
-               v2.AddArg(y)
-               v.AddArg(v2)
                return true
        }
 }
-func rewriteValueARM64_OpLsh16x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess64U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh16x8  <t> x y)
+       // match: (Less64U x y)
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // result: (LessThanU (CMP x y))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
                v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
+               v0.AddArg(y)
                v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM64_OpLsh32x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x16 <t> x y)
+       // match: (Less8 x y)
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
+               v.reset(OpARM64LessThan)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(x)
                v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM64_OpLsh32x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLess8U(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x32 <t> x y)
+       // match: (Less8U x y)
        // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+       // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
        for {
-               t := v.Type
                x := v.Args[0]
                y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
+               v.reset(OpARM64LessThanU)
+               v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
                v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
                v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
                return true
        }
 }
-func rewriteValueARM64_OpLsh32x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLoad(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh32x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 32
-       // result: (SLLconst x [c])
+       // match: (Load <t> ptr mem)
+       // cond: t.IsBoolean()
+       // result: (MOVBUload ptr mem)
        for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) < 32) {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(t.IsBoolean()) {
                        break
                }
-               v.reset(OpARM64SLLconst)
-               v.AddArg(x)
-               v.AuxInt = c
+               v.reset(OpARM64MOVBUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (Lsh32x64  _ (MOVDconst [c]))
-       // cond: uint64(c) >= 32
-       // result: (MOVDconst [0])
+       // match: (Load <t> ptr mem)
+       // cond: (is8BitInt(t) && isSigned(t))
+       // result: (MOVBload ptr mem)
        for {
-               v_1 := v.Args[1]
-               if v_1.Op != OpARM64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               if !(uint64(c) >= 32) {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is8BitInt(t) && isSigned(t)) {
                        break
                }
-               v.reset(OpARM64MOVDconst)
-               v.AuxInt = 0
+               v.reset(OpARM64MOVBload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-       // match: (Lsh32x64 <t> x y)
-       // cond:
-       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+       // match: (Load <t> ptr mem)
+       // cond: (is8BitInt(t) && !isSigned(t))
+       // result: (MOVBUload ptr mem)
        for {
                t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpConst64, t)
-               v1.AuxInt = 0
-               v.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v2.AuxInt = 64
-               v2.AddArg(y)
-               v.AddArg(v2)
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is8BitInt(t) && !isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVBUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpLsh32x8(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh32x8  <t> x y)
-       // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
+       // match: (Load <t> ptr mem)
+       // cond: (is16BitInt(t) && isSigned(t))
+       // result: (MOVHload ptr mem)
        for {
                t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
-               v.AddArg(v0)
-               v2 := b.NewValue0(v.Line, OpConst64, t)
-               v2.AuxInt = 0
-               v.AddArg(v2)
-               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-               v3.AuxInt = 64
-               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v4.AddArg(y)
-               v3.AddArg(v4)
-               v.AddArg(v3)
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t) && isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVHload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValueARM64_OpLsh64x16(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Lsh64x16 <t> x y)
-       // cond:
-       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       // match: (Load <t> ptr mem)
+       // cond: (is16BitInt(t) && !isSigned(t))
+       // result: (MOVHUload ptr mem)
        for {
                t := v.Type
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpARM64CSELULT)
-               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-               v0.AddArg(x)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v0.AddArg(v1)
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is16BitInt(t) && !isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVHUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is32BitInt(t) && isSigned(t))
+       // result: (MOVWload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitInt(t) && isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVWload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is32BitInt(t) && !isSigned(t))
+       // result: (MOVWUload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitInt(t) && !isSigned(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVWUload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: (is64BitInt(t) || isPtr(t))
+       // result: (MOVDload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitInt(t) || isPtr(t)) {
+                       break
+               }
+               v.reset(OpARM64MOVDload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is32BitFloat(t)
+       // result: (FMOVSload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is32BitFloat(t)) {
+                       break
+               }
+               v.reset(OpARM64FMOVSload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (Load <t> ptr mem)
+       // cond: is64BitFloat(t)
+       // result: (FMOVDload ptr mem)
+       for {
+               t := v.Type
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               if !(is64BitFloat(t)) {
+                       break
+               }
+               v.reset(OpARM64FMOVDload)
+               v.AddArg(ptr)
+               v.AddArg(mem)
+               return true
+       }
+       return false
+}
+func rewriteValueARM64_OpLrot16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot16 <t> x [c])
+       // cond:
+       // result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
+       for {
+               t := v.Type
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpARM64OR)
+               v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
+               v0.AuxInt = c & 15
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+               v1.AuxInt = 16 - c&15
+               v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpLrot32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot32 x [c])
+       // cond:
+       // result: (RORWconst x [32-c&31])
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpARM64RORWconst)
+               v.AuxInt = 32 - c&31
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpLrot64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot64 x [c])
+       // cond:
+       // result: (RORconst  x [64-c&63])
+       for {
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpARM64RORconst)
+               v.AuxInt = 64 - c&63
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValueARM64_OpLrot8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lrot8  <t> x [c])
+       // cond:
+       // result: (OR (SLLconst <t> x [c&7])  (SRLconst <t> (ZeroExt8to64  x) [8-c&7]))
+       for {
+               t := v.Type
+               c := v.AuxInt
+               x := v.Args[0]
+               v.reset(OpARM64OR)
+               v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
+               v0.AuxInt = c & 7
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
+               v1.AuxInt = 8 - c&7
+               v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v2.AddArg(x)
+               v1.AddArg(v2)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValueARM64_OpLsh16x16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh16x16 <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+       for {
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
                v.AddArg(v0)
                v2 := b.NewValue0(v.Line, OpConst64, t)
                v2.AuxInt = 0
@@ -3218,10 +3481,10 @@ func rewriteValueARM64_OpLsh64x16(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLsh64x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh16x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh64x32 <t> x y)
+       // match: (Lsh16x32 <t> x y)
        // cond:
        // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
        for {
@@ -3247,11 +3510,11 @@ func rewriteValueARM64_OpLsh64x32(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh16x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh64x64  x (MOVDconst [c]))
-       // cond: uint64(c) < 64
+       // match: (Lsh16x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 16
        // result: (SLLconst x [c])
        for {
                x := v.Args[0]
@@ -3260,16 +3523,16 @@ func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) < 64) {
+               if !(uint64(c) < 16) {
                        break
                }
                v.reset(OpARM64SLLconst)
-               v.AddArg(x)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (Lsh64x64  _ (MOVDconst [c]))
-       // cond: uint64(c) >= 64
+       // match: (Lsh16x64  _ (MOVDconst [c]))
+       // cond: uint64(c) >= 16
        // result: (MOVDconst [0])
        for {
                v_1 := v.Args[1]
@@ -3277,14 +3540,14 @@ func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 64) {
+               if !(uint64(c) >= 16) {
                        break
                }
                v.reset(OpARM64MOVDconst)
                v.AuxInt = 0
                return true
        }
-       // match: (Lsh64x64 <t> x y)
+       // match: (Lsh16x64 <t> x y)
        // cond:
        // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
        for {
@@ -3306,10 +3569,10 @@ func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLsh64x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh16x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh64x8  <t> x y)
+       // match: (Lsh16x8  <t> x y)
        // cond:
        // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
        for {
@@ -3335,10 +3598,10 @@ func rewriteValueARM64_OpLsh64x8(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLsh8x16(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh32x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x16 <t> x y)
+       // match: (Lsh32x16 <t> x y)
        // cond:
        // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
        for {
@@ -3364,10 +3627,10 @@ func rewriteValueARM64_OpLsh8x16(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLsh8x32(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh32x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x32 <t> x y)
+       // match: (Lsh32x32 <t> x y)
        // cond:
        // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
        for {
@@ -3393,11 +3656,11 @@ func rewriteValueARM64_OpLsh8x32(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh32x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x64   x (MOVDconst [c]))
-       // cond: uint64(c) < 8
+       // match: (Lsh32x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 32
        // result: (SLLconst x [c])
        for {
                x := v.Args[0]
@@ -3406,16 +3669,16 @@ func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) < 8) {
+               if !(uint64(c) < 32) {
                        break
                }
                v.reset(OpARM64SLLconst)
-               v.AddArg(x)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (Lsh8x64   _ (MOVDconst [c]))
-       // cond: uint64(c) >= 8
+       // match: (Lsh32x64  _ (MOVDconst [c]))
+       // cond: uint64(c) >= 32
        // result: (MOVDconst [0])
        for {
                v_1 := v.Args[1]
@@ -3423,14 +3686,14 @@ func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
                        break
                }
                c := v_1.AuxInt
-               if !(uint64(c) >= 8) {
+               if !(uint64(c) >= 32) {
                        break
                }
                v.reset(OpARM64MOVDconst)
                v.AuxInt = 0
                return true
        }
-       // match: (Lsh8x64 <t> x y)
+       // match: (Lsh32x64 <t> x y)
        // cond:
        // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
        for {
@@ -3452,10 +3715,10 @@ func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh32x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Lsh8x8  <t> x y)
+       // match: (Lsh32x8  <t> x y)
        // cond:
        // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
        for {
@@ -3481,560 +3744,297 @@ func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
                return true
        }
 }
-func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh64x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Lsh64x16 <t> x y)
        // cond:
-       // result: (MOVBUload [off1+off2] {sym} ptr mem)
+       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVBUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64MOVBUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVBload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64MOVBload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh64x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (Lsh64x32 <t> x y)
        // cond:
-       // result: (MOVBstore [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64MOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVDload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (Lsh64x64  x (MOVDconst [c]))
+       // cond: uint64(c) < 64
+       // result: (SLLconst x [c])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               v.reset(OpARM64MOVDload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-       // cond:
-       // result: (MOVDstore [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               c := v_1.AuxInt
+               if !(uint64(c) < 64) {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // match: (Lsh64x64  _ (MOVDconst [c]))
+       // cond: uint64(c) >= 64
+       // result: (MOVDconst [0])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 64) {
                        break
                }
-               v.reset(OpARM64MOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Lsh64x64 <t> x y)
        // cond:
-       // result: (MOVHUload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVHUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64MOVHUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpConst64, t)
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v.AddArg(v2)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh64x8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Lsh64x8  <t> x y)
        // cond:
-       // result: (MOVHload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVHload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64MOVHload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh8x16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (Lsh8x16 <t> x y)
        // cond:
-       // result: (MOVHstore [off1+off2] {sym} ptr val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+       // result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64MOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh8x32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+       // match: (Lsh8x32 <t> x y)
        // cond:
-       // result: (MOVWUload [off1+off2] {sym} ptr mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVWUload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64MOVWUload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
                return true
        }
-       return false
 }
-func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
+func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
-       // cond:
-       // result: (MOVWload [off1+off2] {sym} ptr mem)
+       // match: (Lsh8x64   x (MOVDconst [c]))
+       // cond: uint64(c) < 8
+       // result: (SLLconst x [c])
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
+               x := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               v.reset(OpARM64MOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               c := v_1.AuxInt
+               if !(uint64(c) < 8) {
+                       break
+               }
+               v.reset(OpARM64SLLconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-       // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+       // match: (Lsh8x64   _ (MOVDconst [c]))
+       // cond: uint64(c) >= 8
+       // result: (MOVDconst [0])
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
+               v_1 := v.Args[1]
+               if v_1.Op != OpARM64MOVDconst {
                        break
                }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               mem := v.Args[1]
-               if !(canMergeSym(sym1, sym2)) {
+               c := v_1.AuxInt
+               if !(uint64(c) >= 8) {
                        break
                }
-               v.reset(OpARM64MOVWload)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(mem)
+               v.reset(OpARM64MOVDconst)
+               v.AuxInt = 0
                return true
        }
-       return false
-}
-func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+       // match: (Lsh8x64 <t> x y)
        // cond:
-       // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+       // result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               v.reset(OpARM64MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpConst64, t)
+               v1.AuxInt = 0
+               v.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v2.AuxInt = 64
+               v2.AddArg(y)
+               v.AddArg(v2)
                return true
        }
-       // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-       // cond: canMergeSym(sym1,sym2)
-       // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+}
+func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Lsh8x8  <t> x y)
+       // cond:
+       // result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
        for {
-               off1 := v.AuxInt
-               sym1 := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpARM64MOVDaddr {
-                       break
-               }
-               off2 := v_0.AuxInt
-               sym2 := v_0.Aux
-               ptr := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(canMergeSym(sym1, sym2)) {
-                       break
-               }
-               v.reset(OpARM64MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = mergeSym(sym1, sym2)
-               v.AddArg(ptr)
-               v.AddArg(val)
-               v.AddArg(mem)
+               t := v.Type
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpARM64CSELULT)
+               v0 := b.NewValue0(v.Line, OpARM64SLL, t)
+               v0.AddArg(x)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v0.AddArg(v1)
+               v.AddArg(v0)
+               v2 := b.NewValue0(v.Line, OpConst64, t)
+               v2.AuxInt = 0
+               v.AddArg(v2)
+               v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
+               v3.AuxInt = 64
+               v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v4.AddArg(y)
+               v3.AddArg(v4)
+               v.AddArg(v3)
                return true
        }
-       return false
 }
 func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
        b := v.Block
@@ -4647,8 +4647,8 @@ func rewriteValueARM64_OpMove(v *Value, config *Config) bool {
                v.AddArg(dst)
                v.AddArg(src)
                v0 := b.NewValue0(v.Line, OpARM64ADDconst, src.Type)
-               v0.AddArg(src)
                v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(src)
                v.AddArg(v0)
                v.AddArg(mem)
                return true
@@ -5179,10 +5179,10 @@ func rewriteValueARM64_OpRsh16Ux64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRLconst)
+               v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = c
                return true
        }
        // match: (Rsh16Ux64 _ (MOVDconst [c]))
@@ -5333,10 +5333,10 @@ func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRAconst)
+               v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = c
                return true
        }
        // match: (Rsh16x64 x (MOVDconst [c]))
@@ -5353,10 +5353,10 @@ func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRAconst)
+               v.AuxInt = 63
                v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = 63
                return true
        }
        // match: (Rsh16x64 x y)
@@ -5491,10 +5491,10 @@ func rewriteValueARM64_OpRsh32Ux64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRLconst)
+               v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = c
                return true
        }
        // match: (Rsh32Ux64 _ (MOVDconst [c]))
@@ -5645,10 +5645,10 @@ func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRAconst)
+               v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = c
                return true
        }
        // match: (Rsh32x64 x (MOVDconst [c]))
@@ -5665,10 +5665,10 @@ func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRAconst)
+               v.AuxInt = 63
                v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = 63
                return true
        }
        // match: (Rsh32x64 x y)
@@ -5799,8 +5799,8 @@ func rewriteValueARM64_OpRsh64Ux64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRLconst)
-               v.AddArg(x)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
        // match: (Rsh64Ux64 _ (MOVDconst [c]))
@@ -5943,8 +5943,8 @@ func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRAconst)
-               v.AddArg(x)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
        // match: (Rsh64x64 x (MOVDconst [c]))
@@ -5961,8 +5961,8 @@ func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRAconst)
-               v.AddArg(x)
                v.AuxInt = 63
+               v.AddArg(x)
                return true
        }
        // match: (Rsh64x64 x y)
@@ -6093,10 +6093,10 @@ func rewriteValueARM64_OpRsh8Ux64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRLconst)
+               v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = c
                return true
        }
        // match: (Rsh8Ux64  _ (MOVDconst [c]))
@@ -6247,10 +6247,10 @@ func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRAconst)
+               v.AuxInt = c
                v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = c
                return true
        }
        // match: (Rsh8x64  x (MOVDconst [c]))
@@ -6267,10 +6267,10 @@ func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
                        break
                }
                v.reset(OpARM64SRAconst)
+               v.AuxInt = 63
                v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
                v0.AddArg(x)
                v.AddArg(v0)
-               v.AuxInt = 63
                return true
        }
        // match: (Rsh8x64 x y)
index 96b5759531bb89da7bc0e5cbecdce7bae664846c..d30454239f94b42fe33706e36cff282b22e6d7d8 100644 (file)
@@ -8,8 +8,6 @@ import "math"
 var _ = math.MinInt8 // in case not otherwise used
 func rewriteValuePPC64(v *Value, config *Config) bool {
        switch v.Op {
-       case OpPPC64ADD:
-               return rewriteValuePPC64_OpPPC64ADD(v, config)
        case OpAdd16:
                return rewriteValuePPC64_OpAdd16(v, config)
        case OpAdd32:
@@ -154,22 +152,6 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
                return rewriteValuePPC64_OpLess8U(v, config)
        case OpLoad:
                return rewriteValuePPC64_OpLoad(v, config)
-       case OpPPC64MOVBstore:
-               return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
-       case OpPPC64MOVBstorezero:
-               return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
-       case OpPPC64MOVDstore:
-               return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
-       case OpPPC64MOVDstorezero:
-               return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
-       case OpPPC64MOVHstore:
-               return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
-       case OpPPC64MOVHstorezero:
-               return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
-       case OpPPC64MOVWstore:
-               return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
-       case OpPPC64MOVWstorezero:
-               return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
        case OpMove:
                return rewriteValuePPC64_OpMove(v, config)
        case OpMul16:
@@ -216,6 +198,24 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
                return rewriteValuePPC64_OpOr64(v, config)
        case OpOr8:
                return rewriteValuePPC64_OpOr8(v, config)
+       case OpPPC64ADD:
+               return rewriteValuePPC64_OpPPC64ADD(v, config)
+       case OpPPC64MOVBstore:
+               return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
+       case OpPPC64MOVBstorezero:
+               return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
+       case OpPPC64MOVDstore:
+               return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
+       case OpPPC64MOVDstorezero:
+               return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
+       case OpPPC64MOVHstore:
+               return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
+       case OpPPC64MOVHstorezero:
+               return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
+       case OpPPC64MOVWstore:
+               return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
+       case OpPPC64MOVWstorezero:
+               return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
        case OpSignExt16to32:
                return rewriteValuePPC64_OpSignExt16to32(v, config)
        case OpSignExt16to64:
@@ -283,41 +283,6 @@ func rewriteValuePPC64(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (ADD (MOVDconst [c]) x)
-       // cond:
-       // result: (ADDconst [c] x)
-       for {
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64MOVDconst {
-                       break
-               }
-               c := v_0.AuxInt
-               x := v.Args[1]
-               v.reset(OpPPC64ADDconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       // match: (ADD x (MOVDconst [c]))
-       // cond:
-       // result: (ADDconst [c] x)
-       for {
-               x := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpPPC64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               v.reset(OpPPC64ADDconst)
-               v.AuxInt = c
-               v.AddArg(x)
-               return true
-       }
-       return false
-}
 func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
@@ -1691,439 +1656,115 @@ func rewriteValuePPC64_OpLoad(v *Value, config *Config) bool {
        }
        return false
 }
-func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
-       // cond: is16Bit(off1+off2)
-       // result: (MOVBstore [off1+off2] {sym} x val mem)
+       // match: (Move [s] _ _ mem)
+       // cond: SizeAndAlign(s).Size() == 0
+       // result: mem
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               x := v_0.Args[0]
-               val := v.Args[1]
+               s := v.AuxInt
                mem := v.Args[2]
-               if !(is16Bit(off1 + off2)) {
+               if !(SizeAndAlign(s).Size() == 0) {
                        break
                }
-               v.reset(OpPPC64MOVBstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(x)
-               v.AddArg(val)
+               v.reset(OpCopy)
+               v.Type = mem.Type
                v.AddArg(mem)
                return true
        }
-       // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
-       // cond: c == 0
-       // result: (MOVBstorezero [off] {sym} ptr mem)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 1
+       // result: (MOVBstore dst (MOVBZload src mem) mem)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpPPC64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
                mem := v.Args[2]
-               if !(c == 0) {
+               if !(SizeAndAlign(s).Size() == 1) {
                        break
                }
-               v.reset(OpPPC64MOVBstorezero)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               v.reset(OpPPC64MOVBstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
-       // cond: is16Bit(off1+off2)
-       // result: (MOVBstorezero [off1+off2] {sym} x mem)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore dst (MOVHZload src mem) mem)
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               x := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is16Bit(off1 + off2)) {
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
+               mem := v.Args[2]
+               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
                        break
                }
-               v.reset(OpPPC64MOVBstorezero)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(x)
+               v.reset(OpPPC64MOVHstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
-       // cond: is16Bit(off1+off2)
-       // result: (MOVDstore [off1+off2] {sym} x val mem)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 2
+       // result: (MOVBstore [1] dst (MOVBZload [1] src mem)           (MOVBstore dst (MOVBZload src mem) mem))
        for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               x := v_0.Args[0]
-               val := v.Args[1]
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
                mem := v.Args[2]
-               if !(is16Bit(off1 + off2)) {
+               if !(SizeAndAlign(s).Size() == 2) {
                        break
                }
-               v.reset(OpPPC64MOVDstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(x)
-               v.AddArg(val)
-               v.AddArg(mem)
+               v.reset(OpPPC64MOVBstore)
+               v.AuxInt = 1
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+               v0.AuxInt = 1
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
+               v1.AddArg(dst)
+               v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
+               v2.AddArg(src)
+               v2.AddArg(mem)
+               v1.AddArg(v2)
+               v1.AddArg(mem)
+               v.AddArg(v1)
                return true
        }
-       // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
-       // cond: c == 0
-       // result: (MOVDstorezero [off] {sym} ptr mem)
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
+       // result: (MOVWstore dst (MOVWload src mem) mem)
        for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpPPC64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
+               s := v.AuxInt
+               dst := v.Args[0]
+               src := v.Args[1]
                mem := v.Args[2]
-               if !(c == 0) {
+               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
                        break
                }
-               v.reset(OpPPC64MOVDstorezero)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
+               v.reset(OpPPC64MOVWstore)
+               v.AddArg(dst)
+               v0 := b.NewValue0(v.Line, OpPPC64MOVWload, config.fe.TypeInt32())
+               v0.AddArg(src)
+               v0.AddArg(mem)
+               v.AddArg(v0)
                v.AddArg(mem)
                return true
        }
-       return false
-}
-func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
-       // cond: is16Bit(off1+off2)
-       // result: (MOVDstorezero [off1+off2] {sym} x mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               x := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is16Bit(off1 + off2)) {
-                       break
-               }
-               v.reset(OpPPC64MOVDstorezero)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(x)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
-       // cond: is16Bit(off1+off2)
-       // result: (MOVHstore [off1+off2] {sym} x val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               x := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is16Bit(off1 + off2)) {
-                       break
-               }
-               v.reset(OpPPC64MOVHstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(x)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
-       // cond: c == 0
-       // result: (MOVHstorezero [off] {sym} ptr mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpPPC64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(c == 0) {
-                       break
-               }
-               v.reset(OpPPC64MOVHstorezero)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
-       // cond: is16Bit(off1+off2)
-       // result: (MOVHstorezero [off1+off2] {sym} x mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               x := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is16Bit(off1 + off2)) {
-                       break
-               }
-               v.reset(OpPPC64MOVHstorezero)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(x)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
-       // cond: is16Bit(off1+off2)
-       // result: (MOVWstore [off1+off2] {sym} x val mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               x := v_0.Args[0]
-               val := v.Args[1]
-               mem := v.Args[2]
-               if !(is16Bit(off1 + off2)) {
-                       break
-               }
-               v.reset(OpPPC64MOVWstore)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(x)
-               v.AddArg(val)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
-       // cond: c == 0
-       // result: (MOVWstorezero [off] {sym} ptr mem)
-       for {
-               off := v.AuxInt
-               sym := v.Aux
-               ptr := v.Args[0]
-               v_1 := v.Args[1]
-               if v_1.Op != OpPPC64MOVDconst {
-                       break
-               }
-               c := v_1.AuxInt
-               mem := v.Args[2]
-               if !(c == 0) {
-                       break
-               }
-               v.reset(OpPPC64MOVWstorezero)
-               v.AuxInt = off
-               v.Aux = sym
-               v.AddArg(ptr)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
-       // cond: is16Bit(off1+off2)
-       // result: (MOVWstorezero [off1+off2] {sym} x mem)
-       for {
-               off1 := v.AuxInt
-               sym := v.Aux
-               v_0 := v.Args[0]
-               if v_0.Op != OpPPC64ADDconst {
-                       break
-               }
-               off2 := v_0.AuxInt
-               x := v_0.Args[0]
-               mem := v.Args[1]
-               if !(is16Bit(off1 + off2)) {
-                       break
-               }
-               v.reset(OpPPC64MOVWstorezero)
-               v.AuxInt = off1 + off2
-               v.Aux = sym
-               v.AddArg(x)
-               v.AddArg(mem)
-               return true
-       }
-       return false
-}
-func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Move [s] _ _ mem)
-       // cond: SizeAndAlign(s).Size() == 0
-       // result: mem
-       for {
-               s := v.AuxInt
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 0) {
-                       break
-               }
-               v.reset(OpCopy)
-               v.Type = mem.Type
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 1
-       // result: (MOVBstore dst (MOVBZload src mem) mem)
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 1) {
-                       break
-               }
-               v.reset(OpPPC64MOVBstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore dst (MOVHZload src mem) mem)
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-                       break
-               }
-               v.reset(OpPPC64MOVHstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 2
-       // result: (MOVBstore [1] dst (MOVBZload [1] src mem)           (MOVBstore dst (MOVBZload src mem) mem))
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 2) {
-                       break
-               }
-               v.reset(OpPPC64MOVBstore)
-               v.AuxInt = 1
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-               v0.AuxInt = 1
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
-               v1.AddArg(dst)
-               v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-               v2.AddArg(src)
-               v2.AddArg(mem)
-               v1.AddArg(v2)
-               v1.AddArg(mem)
-               v.AddArg(v1)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-       // result: (MOVWstore dst (MOVWload src mem) mem)
-       for {
-               s := v.AuxInt
-               dst := v.Args[0]
-               src := v.Args[1]
-               mem := v.Args[2]
-               if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-                       break
-               }
-               v.reset(OpPPC64MOVWstore)
-               v.AddArg(dst)
-               v0 := b.NewValue0(v.Line, OpPPC64MOVWload, config.fe.TypeInt32())
-               v0.AddArg(src)
-               v0.AddArg(mem)
-               v.AddArg(v0)
-               v.AddArg(mem)
-               return true
-       }
-       // match: (Move [s] dst src mem)
-       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-       // result: (MOVHstore [2] dst (MOVHZload [2] src mem)           (MOVHstore dst (MOVHZload src mem) mem))
+       // match: (Move [s] dst src mem)
+       // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
+       // result: (MOVHstore [2] dst (MOVHZload [2] src mem)           (MOVHstore dst (MOVHZload src mem) mem))
        for {
                s := v.AuxInt
                dst := v.Args[0]
@@ -2347,8 +1988,8 @@ func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
                v.AddArg(dst)
                v.AddArg(src)
                v0 := b.NewValue0(v.Line, OpPPC64ADDconst, src.Type)
-               v0.AddArg(src)
                v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(src)
                v.AddArg(v0)
                v.AddArg(mem)
                return true
@@ -2424,306 +2065,665 @@ func rewriteValuePPC64_OpMul64F(v *Value, config *Config) bool {
        _ = b
        // match: (Mul64F x y)
        // cond:
-       // result: (FMUL x y)
+       // result: (FMUL x y)
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpPPC64FMUL)
+               v.AddArg(x)
+               v.AddArg(y)
+               return true
+       }
+}
+func rewriteValuePPC64_OpMul8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Mul8   x y)
+       // cond:
+       // result: (MULLW (SignExt8to32  x) (SignExt8to32 y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpPPC64MULLW)
+               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
+               v1.AddArg(y)
+               v.AddArg(v1)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeg16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg16  x)
+       // cond:
+       // result: (NEG (ZeroExt16to64 x))
+       for {
+               x := v.Args[0]
+               v.reset(OpPPC64NEG)
+               v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeg32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg32  x)
+       // cond:
+       // result: (NEG (ZeroExt32to64 x))
+       for {
+               x := v.Args[0]
+               v.reset(OpPPC64NEG)
+               v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeg64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg64  x)
+       // cond:
+       // result: (NEG x)
+       for {
+               x := v.Args[0]
+               v.reset(OpPPC64NEG)
+               v.AddArg(x)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeg8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neg8   x)
+       // cond:
+       // result: (NEG (ZeroExt8to64 x))
+       for {
+               x := v.Args[0]
+               v.reset(OpPPC64NEG)
+               v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq16 x y)
+       // cond:
+       // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpPPC64NotEqual)
+               v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeq32(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq32 x y)
+       // cond:
+       // result: (NotEqual (CMPW x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpPPC64NotEqual)
+               v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeq64(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64 x y)
+       // cond:
+       // result: (NotEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpPPC64NotEqual)
+               v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeq64F(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq64F x y)
+       // cond:
+       // result: (NotEqual (FCMPU x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpPPC64NotEqual)
+               v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (Neq8 x y)
+       // cond:
+       // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpPPC64NotEqual)
+               v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v1.AddArg(x)
+               v0.AddArg(v1)
+               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
+               v2.AddArg(y)
+               v0.AddArg(v2)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNeqPtr(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NeqPtr x y)
+       // cond:
+       // result: (NotEqual (CMP x y))
+       for {
+               x := v.Args[0]
+               y := v.Args[1]
+               v.reset(OpPPC64NotEqual)
+               v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
+               v0.AddArg(x)
+               v0.AddArg(y)
+               v.AddArg(v0)
+               return true
+       }
+}
+func rewriteValuePPC64_OpNilCheck(v *Value, config *Config) bool {
+       b := v.Block
+       _ = b
+       // match: (NilCheck ptr mem)
+       // cond:
+       // result: (LoweredNilCheck ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64FMUL)
-               v.AddArg(x)
-               v.AddArg(y)
+               ptr := v.Args[0]
+               mem := v.Args[1]
+               v.reset(OpPPC64LoweredNilCheck)
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
 }
-func rewriteValuePPC64_OpMul8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpOffPtr(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Mul8   x y)
+       // match: (OffPtr [off] ptr)
        // cond:
-       // result: (MULLW (SignExt8to32  x) (SignExt8to32 y))
+       // result: (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64MULLW)
-               v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v0.AddArg(x)
+               off := v.AuxInt
+               ptr := v.Args[0]
+               v.reset(OpPPC64ADD)
+               v0 := b.NewValue0(v.Line, OpPPC64MOVDconst, config.Frontend().TypeInt64())
+               v0.AuxInt = off
                v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               v.AddArg(ptr)
                return true
        }
 }
-func rewriteValuePPC64_OpNeg16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpOr16(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg16  x)
+       // match: (Or16 x y)
        // cond:
-       // result: (NEG (ZeroExt16to64 x))
+       // result: (OR (ZeroExt16to64 x) (ZeroExt16to64 y))
        for {
                x := v.Args[0]
-               v.reset(OpPPC64NEG)
+               y := v.Args[1]
+               v.reset(OpPPC64OR)
                v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
                v0.AddArg(x)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValuePPC64_OpNeg32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpOr32(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg32  x)
+       // match: (Or32 x y)
        // cond:
-       // result: (NEG (ZeroExt32to64 x))
+       // result: (OR (ZeroExt32to64 x) (ZeroExt32to64 y))
        for {
                x := v.Args[0]
-               v.reset(OpPPC64NEG)
+               y := v.Args[1]
+               v.reset(OpPPC64OR)
                v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
                v0.AddArg(x)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValuePPC64_OpNeg64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpOr64(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg64  x)
+       // match: (Or64 x y)
        // cond:
-       // result: (NEG x)
+       // result: (OR x y)
        for {
                x := v.Args[0]
-               v.reset(OpPPC64NEG)
+               y := v.Args[1]
+               v.reset(OpPPC64OR)
                v.AddArg(x)
+               v.AddArg(y)
                return true
        }
 }
-func rewriteValuePPC64_OpNeg8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neg8   x)
+       // match: (Or8  x y)
        // cond:
-       // result: (NEG (ZeroExt8to64 x))
+       // result: (OR (ZeroExt8to64 x) (ZeroExt8to64 y))
        for {
                x := v.Args[0]
-               v.reset(OpPPC64NEG)
+               y := v.Args[1]
+               v.reset(OpPPC64OR)
                v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
                v0.AddArg(x)
                v.AddArg(v0)
+               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
+               v1.AddArg(y)
+               v.AddArg(v1)
                return true
        }
 }
-func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq16 x y)
+       // match: (ADD (MOVDconst [c]) x)
        // cond:
-       // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+       // result: (ADDconst [c] x)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64NotEqual)
-               v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64MOVDconst {
+                       break
+               }
+               c := v_0.AuxInt
+               x := v.Args[1]
+               v.reset(OpPPC64ADDconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
-}
-func rewriteValuePPC64_OpNeq32(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Neq32 x y)
+       // match: (ADD x (MOVDconst [c]))
        // cond:
-       // result: (NotEqual (CMPW x y))
+       // result: (ADDconst [c] x)
        for {
                x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64NotEqual)
-               v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpPPC64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               v.reset(OpPPC64ADDconst)
+               v.AuxInt = c
+               v.AddArg(x)
                return true
        }
+       return false
 }
-func rewriteValuePPC64_OpNeq64(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq64 x y)
-       // cond:
-       // result: (NotEqual (CMP x y))
+       // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
+       // cond: is16Bit(off1+off2)
+       // result: (MOVBstore [off1+off2] {sym} x val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64NotEqual)
-               v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               x := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is16Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpPPC64MOVBstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(x)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
+       // cond: c == 0
+       // result: (MOVBstorezero [off] {sym} ptr mem)
+       for {
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpPPC64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(c == 0) {
+                       break
+               }
+               v.reset(OpPPC64MOVBstorezero)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValuePPC64_OpNeq64F(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq64F x y)
-       // cond:
-       // result: (NotEqual (FCMPU x y))
+       // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
+       // cond: is16Bit(off1+off2)
+       // result: (MOVBstorezero [off1+off2] {sym} x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64NotEqual)
-               v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               x := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is16Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpPPC64MOVBstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Neq8 x y)
-       // cond:
-       // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+       // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
+       // cond: is16Bit(off1+off2)
+       // result: (MOVDstore [off1+off2] {sym} x val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64NotEqual)
-               v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v1.AddArg(x)
-               v0.AddArg(v1)
-               v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-               v2.AddArg(y)
-               v0.AddArg(v2)
-               v.AddArg(v0)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               x := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is16Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpPPC64MOVDstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(x)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValuePPC64_OpNeqPtr(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (NeqPtr x y)
-       // cond:
-       // result: (NotEqual (CMP x y))
+       // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
+       // cond: c == 0
+       // result: (MOVDstorezero [off] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64NotEqual)
-               v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-               v0.AddArg(x)
-               v0.AddArg(y)
-               v.AddArg(v0)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpPPC64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(c == 0) {
+                       break
+               }
+               v.reset(OpPPC64MOVDstorezero)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValuePPC64_OpNilCheck(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (NilCheck ptr mem)
-       // cond:
-       // result: (LoweredNilCheck ptr mem)
+       // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
+       // cond: is16Bit(off1+off2)
+       // result: (MOVDstorezero [off1+off2] {sym} x mem)
        for {
-               ptr := v.Args[0]
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               x := v_0.Args[0]
                mem := v.Args[1]
-               v.reset(OpPPC64LoweredNilCheck)
-               v.AddArg(ptr)
+               if !(is16Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpPPC64MOVDstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(x)
                v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValuePPC64_OpOffPtr(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (OffPtr [off] ptr)
-       // cond:
-       // result: (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
+       // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
+       // cond: is16Bit(off1+off2)
+       // result: (MOVHstore [off1+off2] {sym} x val mem)
+       for {
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               x := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is16Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpPPC64MOVHstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(x)
+               v.AddArg(val)
+               v.AddArg(mem)
+               return true
+       }
+       // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
+       // cond: c == 0
+       // result: (MOVHstorezero [off] {sym} ptr mem)
        for {
                off := v.AuxInt
+               sym := v.Aux
                ptr := v.Args[0]
-               v.reset(OpPPC64ADD)
-               v0 := b.NewValue0(v.Line, OpPPC64MOVDconst, config.Frontend().TypeInt64())
-               v0.AuxInt = off
-               v.AddArg(v0)
+               v_1 := v.Args[1]
+               if v_1.Op != OpPPC64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(c == 0) {
+                       break
+               }
+               v.reset(OpPPC64MOVHstorezero)
+               v.AuxInt = off
+               v.Aux = sym
                v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValuePPC64_OpOr16(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or16 x y)
-       // cond:
-       // result: (OR (ZeroExt16to64 x) (ZeroExt16to64 y))
+       // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
+       // cond: is16Bit(off1+off2)
+       // result: (MOVHstorezero [off1+off2] {sym} x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64OR)
-               v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               x := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is16Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpPPC64MOVHstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValuePPC64_OpOr32(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or32 x y)
-       // cond:
-       // result: (OR (ZeroExt32to64 x) (ZeroExt32to64 y))
+       // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
+       // cond: is16Bit(off1+off2)
+       // result: (MOVWstore [off1+off2] {sym} x val mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64OR)
-               v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               x := v_0.Args[0]
+               val := v.Args[1]
+               mem := v.Args[2]
+               if !(is16Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpPPC64MOVWstore)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(x)
+               v.AddArg(val)
+               v.AddArg(mem)
                return true
        }
-}
-func rewriteValuePPC64_OpOr64(v *Value, config *Config) bool {
-       b := v.Block
-       _ = b
-       // match: (Or64 x y)
-       // cond:
-       // result: (OR x y)
+       // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
+       // cond: c == 0
+       // result: (MOVWstorezero [off] {sym} ptr mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64OR)
-               v.AddArg(x)
-               v.AddArg(y)
+               off := v.AuxInt
+               sym := v.Aux
+               ptr := v.Args[0]
+               v_1 := v.Args[1]
+               if v_1.Op != OpPPC64MOVDconst {
+                       break
+               }
+               c := v_1.AuxInt
+               mem := v.Args[2]
+               if !(c == 0) {
+                       break
+               }
+               v.reset(OpPPC64MOVWstorezero)
+               v.AuxInt = off
+               v.Aux = sym
+               v.AddArg(ptr)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
-func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool {
+func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
        b := v.Block
        _ = b
-       // match: (Or8  x y)
-       // cond:
-       // result: (OR (ZeroExt8to64 x) (ZeroExt8to64 y))
+       // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
+       // cond: is16Bit(off1+off2)
+       // result: (MOVWstorezero [off1+off2] {sym} x mem)
        for {
-               x := v.Args[0]
-               y := v.Args[1]
-               v.reset(OpPPC64OR)
-               v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v0.AddArg(x)
-               v.AddArg(v0)
-               v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-               v1.AddArg(y)
-               v.AddArg(v1)
+               off1 := v.AuxInt
+               sym := v.Aux
+               v_0 := v.Args[0]
+               if v_0.Op != OpPPC64ADDconst {
+                       break
+               }
+               off2 := v_0.AuxInt
+               x := v_0.Args[0]
+               mem := v.Args[1]
+               if !(is16Bit(off1 + off2)) {
+                       break
+               }
+               v.reset(OpPPC64MOVWstorezero)
+               v.AuxInt = off1 + off2
+               v.Aux = sym
+               v.AddArg(x)
+               v.AddArg(mem)
                return true
        }
+       return false
 }
 func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool {
        b := v.Block
@@ -3456,8 +3456,8 @@ func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
                v.AuxInt = SizeAndAlign(s).Align()
                v.AddArg(ptr)
                v0 := b.NewValue0(v.Line, OpPPC64ADDconst, ptr.Type)
-               v0.AddArg(ptr)
                v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
+               v0.AddArg(ptr)
                v.AddArg(v0)
                v.AddArg(mem)
                return true
index d2fbfb9f10c247de9778c8ec25346547f45a891b..33d90f53414004ae77de999cc70b97298a7be0c6 100644 (file)
@@ -198,19 +198,19 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
        // cond: is64BitInt(v.Type) && v.Type.IsSigned()
        // result: (Int64Make     (Arg <config.fe.TypeInt32()> {n} [off+4])     (Arg <config.fe.TypeUInt32()> {n} [off]))
        for {
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(is64BitInt(v.Type) && v.Type.IsSigned()) {
                        break
                }
                v.reset(OpInt64Make)
                v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32())
-               v0.Aux = n
                v0.AuxInt = off + 4
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-               v1.Aux = n
                v1.AuxInt = off
+               v1.Aux = n
                v.AddArg(v1)
                return true
        }
@@ -218,19 +218,19 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
        // cond: is64BitInt(v.Type) && !v.Type.IsSigned()
        // result: (Int64Make     (Arg <config.fe.TypeUInt32()> {n} [off+4])     (Arg <config.fe.TypeUInt32()> {n} [off]))
        for {
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(is64BitInt(v.Type) && !v.Type.IsSigned()) {
                        break
                }
                v.reset(OpInt64Make)
                v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-               v0.Aux = n
                v0.AuxInt = off + 4
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-               v1.Aux = n
                v1.AuxInt = off
+               v1.Aux = n
                v.AddArg(v1)
                return true
        }
@@ -738,13 +738,13 @@ func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
        // cond: c <= 32
        // result: (Int64Make           (Or32 <config.fe.TypeUInt32()>                  (Lsh32x32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [c]))                   (Rsh32Ux32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [32-c])))              (Or32 <config.fe.TypeUInt32()>                  (Lsh32x32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [c]))                   (Rsh32Ux32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [32-c]))))
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpInt64Make {
                        break
                }
                hi := v_0.Args[0]
                lo := v_0.Args[1]
-               c := v.AuxInt
                if !(c <= 32) {
                        break
                }
@@ -783,22 +783,22 @@ func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
        // cond: c > 32
        // result: (Lrot64 (Int64Make lo hi) [c-32])
        for {
+               c := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpInt64Make {
                        break
                }
                hi := v_0.Args[0]
                lo := v_0.Args[1]
-               c := v.AuxInt
                if !(c > 32) {
                        break
                }
                v.reset(OpLrot64)
+               v.AuxInt = c - 32
                v0 := b.NewValue0(v.Line, OpInt64Make, config.fe.TypeUInt64())
                v0.AddArg(lo)
                v0.AddArg(hi)
                v.AddArg(v0)
-               v.AuxInt = c - 32
                return true
        }
        return false
index 00bb24a67bcbd0f1624a6b82787b3153f2f4e414..f4f2b50f6203a983b22730730b6b079bb4286a75 100644 (file)
@@ -733,8 +733,8 @@ func rewriteValuegeneric_OpAddPtr(v *Value, config *Config) bool {
                c := v_1.AuxInt
                v.reset(OpOffPtr)
                v.Type = t
-               v.AddArg(x)
                v.AuxInt = c
+               v.AddArg(x)
                return true
        }
        return false
@@ -1370,19 +1370,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // cond: v.Type.IsString()
        // result: (StringMake     (Arg <config.fe.TypeBytePtr()> {n} [off])     (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
        for {
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(v.Type.IsString()) {
                        break
                }
                v.reset(OpStringMake)
                v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
-               v0.Aux = n
                v0.AuxInt = off
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
-               v1.Aux = n
                v1.AuxInt = off + config.PtrSize
+               v1.Aux = n
                v.AddArg(v1)
                return true
        }
@@ -1390,23 +1390,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // cond: v.Type.IsSlice()
        // result: (SliceMake     (Arg <v.Type.ElemType().PtrTo()> {n} [off])     (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize])     (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
        for {
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(v.Type.IsSlice()) {
                        break
                }
                v.reset(OpSliceMake)
                v0 := b.NewValue0(v.Line, OpArg, v.Type.ElemType().PtrTo())
-               v0.Aux = n
                v0.AuxInt = off
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
-               v1.Aux = n
                v1.AuxInt = off + config.PtrSize
+               v1.Aux = n
                v.AddArg(v1)
                v2 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
-               v2.Aux = n
                v2.AuxInt = off + 2*config.PtrSize
+               v2.Aux = n
                v.AddArg(v2)
                return true
        }
@@ -1414,19 +1414,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // cond: v.Type.IsInterface()
        // result: (IMake     (Arg <config.fe.TypeBytePtr()> {n} [off])     (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
        for {
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(v.Type.IsInterface()) {
                        break
                }
                v.reset(OpIMake)
                v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
-               v0.Aux = n
                v0.AuxInt = off
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
-               v1.Aux = n
                v1.AuxInt = off + config.PtrSize
+               v1.Aux = n
                v.AddArg(v1)
                return true
        }
@@ -1434,19 +1434,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // cond: v.Type.IsComplex() && v.Type.Size() == 16
        // result: (ComplexMake     (Arg <config.fe.TypeFloat64()> {n} [off])     (Arg <config.fe.TypeFloat64()> {n} [off+8]))
        for {
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(v.Type.IsComplex() && v.Type.Size() == 16) {
                        break
                }
                v.reset(OpComplexMake)
                v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
-               v0.Aux = n
                v0.AuxInt = off
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
-               v1.Aux = n
                v1.AuxInt = off + 8
+               v1.Aux = n
                v.AddArg(v1)
                return true
        }
@@ -1454,19 +1454,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // cond: v.Type.IsComplex() && v.Type.Size() == 8
        // result: (ComplexMake     (Arg <config.fe.TypeFloat32()> {n} [off])     (Arg <config.fe.TypeFloat32()> {n} [off+4]))
        for {
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(v.Type.IsComplex() && v.Type.Size() == 8) {
                        break
                }
                v.reset(OpComplexMake)
                v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
-               v0.Aux = n
                v0.AuxInt = off
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
-               v1.Aux = n
                v1.AuxInt = off + 4
+               v1.Aux = n
                v.AddArg(v1)
                return true
        }
@@ -1486,15 +1486,15 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // result: (StructMake1     (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
        for {
                t := v.Type
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
                        break
                }
                v.reset(OpStructMake1)
                v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
-               v0.Aux = n
                v0.AuxInt = off + t.FieldOff(0)
+               v0.Aux = n
                v.AddArg(v0)
                return true
        }
@@ -1503,19 +1503,19 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // result: (StructMake2     (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])     (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
        for {
                t := v.Type
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
                        break
                }
                v.reset(OpStructMake2)
                v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
-               v0.Aux = n
                v0.AuxInt = off + t.FieldOff(0)
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
-               v1.Aux = n
                v1.AuxInt = off + t.FieldOff(1)
+               v1.Aux = n
                v.AddArg(v1)
                return true
        }
@@ -1524,23 +1524,23 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // result: (StructMake3     (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])     (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])     (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
        for {
                t := v.Type
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
                        break
                }
                v.reset(OpStructMake3)
                v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
-               v0.Aux = n
                v0.AuxInt = off + t.FieldOff(0)
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
-               v1.Aux = n
                v1.AuxInt = off + t.FieldOff(1)
+               v1.Aux = n
                v.AddArg(v1)
                v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
-               v2.Aux = n
                v2.AuxInt = off + t.FieldOff(2)
+               v2.Aux = n
                v.AddArg(v2)
                return true
        }
@@ -1549,27 +1549,27 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
        // result: (StructMake4     (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])     (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])     (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)])     (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
        for {
                t := v.Type
-               n := v.Aux
                off := v.AuxInt
+               n := v.Aux
                if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
                        break
                }
                v.reset(OpStructMake4)
                v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
-               v0.Aux = n
                v0.AuxInt = off + t.FieldOff(0)
+               v0.Aux = n
                v.AddArg(v0)
                v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
-               v1.Aux = n
                v1.AuxInt = off + t.FieldOff(1)
+               v1.Aux = n
                v.AddArg(v1)
                v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
-               v2.Aux = n
                v2.AuxInt = off + t.FieldOff(2)
+               v2.Aux = n
                v.AddArg(v2)
                v3 := b.NewValue0(v.Line, OpArg, t.FieldType(3))
-               v3.Aux = n
                v3.AuxInt = off + t.FieldOff(3)
+               v3.Aux = n
                v.AddArg(v3)
                return true
        }
@@ -6359,26 +6359,26 @@ func rewriteValuegeneric_OpOffPtr(v *Value, config *Config) bool {
        // cond:
        // result: (OffPtr p [a+b])
        for {
+               a := v.AuxInt
                v_0 := v.Args[0]
                if v_0.Op != OpOffPtr {
                        break
                }
-               p := v_0.Args[0]
                b := v_0.AuxInt
-               a := v.AuxInt
+               p := v_0.Args[0]
                v.reset(OpOffPtr)
-               v.AddArg(p)
                v.AuxInt = a + b
+               v.AddArg(p)
                return true
        }
        // match: (OffPtr p [0])
        // cond: v.Type.Compare(p.Type) == CMPeq
        // result: p
        for {
-               p := v.Args[0]
                if v.AuxInt != 0 {
                        break
                }
+               p := v.Args[0]
                if !(v.Type.Compare(p.Type) == CMPeq) {
                        break
                }