From fa63d243333a735e81906bafc3951a4697fb6dee Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Tue, 22 Sep 2020 11:23:29 +0200 Subject: [PATCH] cmd/compile: switch to typed for const memory folding amd64 rules Passes gotip build -toolexec 'toolstash -cmp' -a std Change-Id: Ide811e4b4130a0bd2ac560375fd7634bc51be251 Reviewed-on: https://go-review.googlesource.com/c/go/+/256220 Trust: Alberto Donizetti Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 116 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1892 +++++++++--------- 2 files changed, 1004 insertions(+), 1004 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index eae2d0b094..67c69674f7 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1134,94 +1134,94 @@ // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows // what variables are being read/written by the ops. (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) (MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOV(Q|L|W|B)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOV(Q|L|W|B)storeconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(MOV(Q|L|W|B)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) => + (MOV(Q|L|W|B)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) ((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) ((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) (CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (CMP(Q|L|W|B)load [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) (CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) -> - (CMP(Q|L|W|B)constload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) ((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) ((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) -> - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) -> - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) // fold LEAQs together -(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) +(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ [off1+off2] {mergeSymTyped(sym1,sym2)} x) // LEAQ into LEAQ1 -(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> - (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) // LEAQ1 into LEAQ -(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) // LEAQ into LEAQ[248] -(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> - (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) -(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> - (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) -(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> - (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) +(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) +(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) // LEAQ[248] into LEAQ -(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) -(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) -(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) +(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) +(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) // LEAQ[1248] into LEAQ[1248]. Only some such merges are possible. -(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y) -(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x) -(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(off1+2*off2) && sym2 == nil -> +(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y) +(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x) +(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil => (LEAQ4 [off1+2*off2] {sym1} x y) -(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(off1+4*off2) && sym2 == nil -> +(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil => (LEAQ8 [off1+4*off2] {sym1} x y) // TODO: more? // Lower LEAQ2/4/8 when the offset is a constant -(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(off+scale*2) -> - (LEAQ [off+scale*2] {sym} x) -(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(off+scale*4) -> - (LEAQ [off+scale*4] {sym} x) -(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(off+scale*8) -> - (LEAQ [off+scale*8] {sym} x) +(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) => + (LEAQ [off+int32(scale)*2] {sym} x) +(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) => + (LEAQ [off+int32(scale)*4] {sym} x) +(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) => + (LEAQ [off+int32(scale)*8] {sym} x) // Absorb InvertFlags into branches. (LT (InvertFlags cmp) yes no) -> (GT cmp yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index f4bb7f5671..b50c8c3496 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1697,24 +1697,24 @@ func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool { return true } // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ADDLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -1749,25 +1749,25 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { return true } // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ADDLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -1820,25 +1820,25 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { return true } // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ADDLmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -2313,24 +2313,24 @@ func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool { return true } // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (ADDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ADDQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -2365,25 +2365,25 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { return true } // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ADDQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -2436,25 +2436,25 @@ func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool { return true } // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ADDQmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -2519,25 +2519,25 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { return true } // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ADDSDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -2622,25 +2622,25 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { return true } // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ADDSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ADDSSload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -2894,24 +2894,24 @@ func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool { return true } // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ANDLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -2946,25 +2946,25 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { return true } // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ANDLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ANDLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -3017,25 +3017,25 @@ func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool { return true } // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ANDLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ANDLmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -3273,24 +3273,24 @@ func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool { return true } // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (ANDQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ANDQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -3325,25 +3325,25 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { return true } // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ANDQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ANDQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -3396,25 +3396,25 @@ func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool { return true } // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ANDQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ANDQmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -3538,24 +3538,24 @@ func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool { return true } // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (BTCLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTCLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -3588,25 +3588,25 @@ func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool { return true } // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (BTCLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTCLmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -3689,24 +3689,24 @@ func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool { return true } // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (BTCQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTCQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -3739,25 +3739,25 @@ func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool { return true } // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (BTCQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTCQmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -4022,24 +4022,24 @@ func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool { return true } // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (BTRLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTRLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -4072,25 +4072,25 @@ func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool { return true } // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (BTRLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTRLmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -4199,24 +4199,24 @@ func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool { return true } // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (BTRQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTRQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -4249,25 +4249,25 @@ func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool { return true } // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (BTRQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTRQmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -4368,24 +4368,24 @@ func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool { return true } // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (BTSLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTSLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -4418,25 +4418,25 @@ func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool { return true } // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (BTSLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTSLmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -4545,24 +4545,24 @@ func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool { return true } // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (BTSQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTSQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -4595,25 +4595,25 @@ func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool { return true } // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (BTSQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64BTSQmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -7003,24 +7003,24 @@ func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool { return true } // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64CMPBconstload) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -7053,25 +7053,25 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { return true } // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (CMPBload [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64CMPBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -7404,24 +7404,24 @@ func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool { return true } // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64CMPLconstload) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -7454,25 +7454,25 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { return true } // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (CMPLload [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64CMPLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -7985,24 +7985,24 @@ func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool { return true } // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64CMPQconstload) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -8035,25 +8035,25 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { return true } // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (CMPQload [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64CMPQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -8371,24 +8371,24 @@ func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool { return true } // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64CMPWconstload) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -8421,25 +8421,25 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { return true } // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (CMPWload [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64CMPWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -8580,25 +8580,25 @@ func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool { return true } // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (DIVSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64DIVSDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -8658,25 +8658,25 @@ func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool { return true } // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (DIVSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64DIVSSload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -9128,111 +9128,111 @@ func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool { break } // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ [off1+off2] {mergeSymTyped(sym1,sym2)} x) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) x := v_0.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg(x) return true } // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ1 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64LEAQ1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ2 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64LEAQ2) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ4 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64LEAQ4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ8 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) y := v_0.Args[1] x := v_0.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64LEAQ8) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } @@ -9323,68 +9323,68 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { break } // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ1 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64LEAQ { continue } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) x := v_0.Args[0] y := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { continue } v.reset(OpAMD64LEAQ1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } break } // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpAMD64LEAQ1 { continue } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) y := v_1.Args[1] - if y != v_1.Args[0] || !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { continue } v.reset(OpAMD64LEAQ2) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } break } // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (LEAQ2 [off1+off2] {mergeSymTyped(sym1, sym2)} y x) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpAMD64LEAQ1 { continue } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) _ = v_1.Args[1] v_1_0 := v_1.Args[0] v_1_1 := v_1.Args[1] @@ -9393,12 +9393,12 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { continue } y := v_1_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { continue } v.reset(OpAMD64LEAQ2) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(y, x) return true } @@ -9501,86 +9501,86 @@ func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool { return true } // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ2 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) x := v_0.Args[0] y := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } v.reset(OpAMD64LEAQ2) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) - // cond: is32Bit(off1+2*off2) && sym2 == nil + // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil // result: (LEAQ4 [off1+2*off2] {sym1} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64LEAQ1 { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) y := v_1.Args[1] - if y != v_1.Args[0] || !(is32Bit(off1+2*off2) && sym2 == nil) { + if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) { break } v.reset(OpAMD64LEAQ4) - v.AuxInt = off1 + 2*off2 - v.Aux = sym1 + v.AuxInt = int32ToAuxInt(off1 + 2*off2) + v.Aux = symToAux(sym1) v.AddArg2(x, y) return true } // match: (LEAQ2 [off] {sym} x (MOVQconst [scale])) - // cond: is32Bit(off+scale*2) - // result: (LEAQ [off+scale*2] {sym} x) + // cond: is32Bit(int64(off)+int64(scale)*2) + // result: (LEAQ [off+int32(scale)*2] {sym} x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - scale := v_1.AuxInt - if !(is32Bit(off + scale*2)) { + scale := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*2)) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = off + scale*2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off + int32(scale)*2) + v.Aux = symToAux(sym) v.AddArg(x) return true } // match: (LEAQ2 [off] {sym} x (MOVLconst [scale])) - // cond: is32Bit(off+scale*2) - // result: (LEAQ [off+scale*2] {sym} x) + // cond: is32Bit(int64(off)+int64(scale)*2) + // result: (LEAQ [off+int32(scale)*2] {sym} x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - scale := v_1.AuxInt - if !(is32Bit(off + scale*2)) { + scale := auxIntToInt32(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*2)) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = off + scale*2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off + int32(scale)*2) + v.Aux = symToAux(sym) v.AddArg(x) return true } @@ -9648,86 +9648,86 @@ func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool { return true } // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ4 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) x := v_0.Args[0] y := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } v.reset(OpAMD64LEAQ4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) - // cond: is32Bit(off1+4*off2) && sym2 == nil + // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil // result: (LEAQ8 [off1+4*off2] {sym1} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64LEAQ1 { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) y := v_1.Args[1] - if y != v_1.Args[0] || !(is32Bit(off1+4*off2) && sym2 == nil) { + if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) { break } v.reset(OpAMD64LEAQ8) - v.AuxInt = off1 + 4*off2 - v.Aux = sym1 + v.AuxInt = int32ToAuxInt(off1 + 4*off2) + v.Aux = symToAux(sym1) v.AddArg2(x, y) return true } // match: (LEAQ4 [off] {sym} x (MOVQconst [scale])) - // cond: is32Bit(off+scale*4) - // result: (LEAQ [off+scale*4] {sym} x) + // cond: is32Bit(int64(off)+int64(scale)*4) + // result: (LEAQ [off+int32(scale)*4] {sym} x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - scale := v_1.AuxInt - if !(is32Bit(off + scale*4)) { + scale := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*4)) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = off + scale*4 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off + int32(scale)*4) + v.Aux = symToAux(sym) v.AddArg(x) return true } // match: (LEAQ4 [off] {sym} x (MOVLconst [scale])) - // cond: is32Bit(off+scale*4) - // result: (LEAQ [off+scale*4] {sym} x) + // cond: is32Bit(int64(off)+int64(scale)*4) + // result: (LEAQ [off+int32(scale)*4] {sym} x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - scale := v_1.AuxInt - if !(is32Bit(off + scale*4)) { + scale := auxIntToInt32(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*4)) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = off + scale*4 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off + int32(scale)*4) + v.Aux = symToAux(sym) v.AddArg(x) return true } @@ -9779,64 +9779,64 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool { return true } // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ8 [off1+off2] {mergeSymTyped(sym1,sym2)} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) x := v_0.Args[0] y := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } v.reset(OpAMD64LEAQ8) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(x, y) return true } // match: (LEAQ8 [off] {sym} x (MOVQconst [scale])) - // cond: is32Bit(off+scale*8) - // result: (LEAQ [off+scale*8] {sym} x) + // cond: is32Bit(int64(off)+int64(scale)*8) + // result: (LEAQ [off+int32(scale)*8] {sym} x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64MOVQconst { break } - scale := v_1.AuxInt - if !(is32Bit(off + scale*8)) { + scale := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*8)) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = off + scale*8 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off + int32(scale)*8) + v.Aux = symToAux(sym) v.AddArg(x) return true } // match: (LEAQ8 [off] {sym} x (MOVLconst [scale])) - // cond: is32Bit(off+scale*8) - // result: (LEAQ [off+scale*8] {sym} x) + // cond: is32Bit(int64(off)+int64(scale)*8) + // result: (LEAQ [off+int32(scale)*8] {sym} x) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 if v_1.Op != OpAMD64MOVLconst { break } - scale := v_1.AuxInt - if !(is32Bit(off + scale*8)) { + scale := auxIntToInt32(v_1.AuxInt) + if !(is32Bit(int64(off) + int64(scale)*8)) { break } v.reset(OpAMD64LEAQ) - v.AuxInt = off + scale*8 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off + int32(scale)*8) + v.Aux = symToAux(sym) v.AddArg(x) return true } @@ -9992,24 +9992,24 @@ func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool { return true } // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBQSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVBQSXload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -10241,24 +10241,24 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { return true } // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -10637,25 +10637,25 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { return true } // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -11531,24 +11531,24 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { return true } // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { break } v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -11773,24 +11773,24 @@ func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { return true } // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVLQSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVLQSXload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -12052,24 +12052,24 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { return true } // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -12245,25 +12245,25 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { return true } // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -13029,24 +13029,24 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { return true } // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { break } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -13174,24 +13174,24 @@ func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { return true } // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVOload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVOload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -13227,25 +13227,25 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { return true } // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVOstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVOstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -13432,24 +13432,24 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { return true } // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVQload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -13576,25 +13576,25 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { return true } // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -14210,24 +14210,24 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { return true } // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -14328,24 +14328,24 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { return true } // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVSDload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVSDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -14395,25 +14395,25 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { return true } // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVSDstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVSDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -14461,24 +14461,24 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { return true } // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVSSload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVSSload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -14528,25 +14528,25 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { return true } // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVSSstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVSSstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -14707,24 +14707,24 @@ func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { return true } // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWQSXload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVWQSXload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -14898,24 +14898,24 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { return true } // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -15074,25 +15074,25 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { return true } // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -15427,24 +15427,24 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { return true } // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) + // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) { break } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -16505,25 +16505,25 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { return true } // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MULSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MULSDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -16608,25 +16608,25 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { return true } // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MULSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MULSSload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -18259,24 +18259,24 @@ func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool { return true } // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ORLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -18311,25 +18311,25 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { return true } // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ORLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ORLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -18382,25 +18382,25 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { return true } // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ORLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ORLmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -19886,24 +19886,24 @@ func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool { return true } // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (ORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ORQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -19938,25 +19938,25 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { return true } // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ORQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ORQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -20009,25 +20009,25 @@ func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool { return true } // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (ORQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64ORQmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -21537,25 +21537,25 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { return true } // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETAEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETAEstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -21697,25 +21697,25 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { return true } // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETAstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETAstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -22067,25 +22067,25 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { return true } // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETBEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETBEstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -22227,25 +22227,25 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { return true } // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -23219,25 +23219,25 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { return true } // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETEQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETEQstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -23509,25 +23509,25 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { return true } // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETGEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETGEstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -23669,25 +23669,25 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { return true } // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETGstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETGstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -23959,25 +23959,25 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { return true } // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETLEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETLEstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -24119,25 +24119,25 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { return true } // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETLstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -25135,25 +25135,25 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { return true } // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SETNEstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SETNEstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -26460,25 +26460,25 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { return true } // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SUBLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -26531,25 +26531,25 @@ func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool { return true } // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SUBLmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -26739,25 +26739,25 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { return true } // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SUBQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -26810,25 +26810,25 @@ func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool { return true } // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SUBQmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -26890,25 +26890,25 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { return true } // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBSDload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SUBSDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -26990,25 +26990,25 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { return true } // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (SUBSSload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64SUBSSload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -27909,24 +27909,24 @@ func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool { return true } // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64XORLconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -27961,25 +27961,25 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { return true } // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (XORLload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64XORLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -28032,25 +28032,25 @@ func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool { return true } // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (XORLmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64XORLmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } @@ -28277,24 +28277,24 @@ func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool { return true } // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) - // result: (XORQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) + // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSymTyped(sym1,sym2)} base mem) for { - valoff1 := v.AuxInt - sym1 := v.Aux + valoff1 := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) { + if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64XORQconstmodify) - v.AuxInt = ValAndOff(valoff1).add(off2) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } @@ -28329,25 +28329,25 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { return true } // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (XORQload [off1+off2] {mergeSymTyped(sym1,sym2)} val base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) base := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64XORQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, base, mem) return true } @@ -28400,25 +28400,25 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { return true } // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (XORQmodify [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64XORQmodify) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } -- 2.50.0