(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= m && int8(m) < n => (FlagLT_ULT)
// TESTQ c c sets flags like CMPQ c 0.
-(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c == 0 -> (FlagEQ)
-(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c < 0 -> (FlagLT_UGT)
-(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c > 0 -> (FlagGT_UGT)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
+(TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT)
+(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT)
+(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT)
// TODO: DIVxU also.
// If possible, put a rematerializeable value in the first argument slot,
// to reduce the odds that another value will be have to spilled
// specifically to free up AX.
-(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() -> (HMUL(Q|L) y x)
-(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() -> (HMUL(Q|L)U y x)
+(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x)
+(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
// Fold loads into compares
// Note: these may be undone by the flagalloc pass.
-(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) -> (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
-(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) -> (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
+(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
+(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
&& l.Uses == 1
&& clobber(l) =>
@l.Block (CMP(W|B)constload {sym} [makeValAndOff32(int32(c),off)] ptr mem)
-(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
-(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(c,off) -> (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
-(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),off) -> (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
-(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),off) -> (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
+(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,int64(off)) => (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem)
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
&& l == l2
&& l.Uses == 2
- && validValAndOff(0,off)
- && clobber(l) ->
- @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0,off)] ptr mem)
-
-(MOVBload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read8(sym, off))])
-(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
-(MOVLload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))])
-(MOVQload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))])
-(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) ->
- (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder))])
- (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder))]) mem))
+ && validValAndOff(0, int64(off))
+ && clobber(l) =>
+ @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
+ (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
+ (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
if l.Op != OpAMD64MOVBload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
x := v_1
break
}
v.reset(OpAMD64CMPBload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
if l.Op != OpAMD64MOVBload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoad(v, l) && clobber(l)) {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
return true
}
// match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
- // cond: validValAndOff(int64(int8(c)),off)
- // result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
+ // cond: validValAndOff(int64(int8(c)),int64(off))
+ // result: (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
- if !(validValAndOff(int64(int8(c)), off)) {
+ if !(validValAndOff(int64(int8(c)), int64(off))) {
break
}
v.reset(OpAMD64CMPBconstload)
- v.AuxInt = makeValAndOff(int64(int8(c)), off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
if l.Op != OpAMD64MOVLload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
x := v_1
break
}
v.reset(OpAMD64CMPLload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
if l.Op != OpAMD64MOVLload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoad(v, l) && clobber(l)) {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
return true
}
// match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
- // cond: validValAndOff(c,off)
- // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: validValAndOff(int64(c),int64(off))
+ // result: (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
- if !(validValAndOff(c, off)) {
+ if !(validValAndOff(int64(c), int64(off))) {
break
}
v.reset(OpAMD64CMPLconstload)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
if l.Op != OpAMD64MOVQload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
x := v_1
break
}
v.reset(OpAMD64CMPQload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
if l.Op != OpAMD64MOVQload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoad(v, l) && clobber(l)) {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
return true
}
// match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
- // cond: validValAndOff(c,off)
- // result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ // cond: validValAndOff(c,int64(off))
+ // result: (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVQconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt64(v_1.AuxInt)
mem := v_2
- if !(validValAndOff(c, off)) {
+ if !(validValAndOff(c, int64(off))) {
break
}
v.reset(OpAMD64CMPQconstload)
- v.AuxInt = makeValAndOff(c, off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff64(c, int64(off)))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
if l.Op != OpAMD64MOVWload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
x := v_1
break
}
v.reset(OpAMD64CMPWload)
- v.AuxInt = off
- v.Aux = sym
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
v.AddArg3(ptr, x, mem)
return true
}
if l.Op != OpAMD64MOVWload {
break
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
if !(canMergeLoad(v, l) && clobber(l)) {
}
v.reset(OpAMD64InvertFlags)
v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
- v0.AuxInt = off
- v0.Aux = sym
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
v0.AddArg3(ptr, x, mem)
v.AddArg(v0)
return true
return true
}
// match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
- // cond: validValAndOff(int64(int16(c)),off)
- // result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
+ // cond: validValAndOff(int64(int16(c)),int64(off))
+ // result: (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem)
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVLconst {
break
}
- c := v_1.AuxInt
+ c := auxIntToInt32(v_1.AuxInt)
mem := v_2
- if !(validValAndOff(int64(int16(c)), off)) {
+ if !(validValAndOff(int64(int16(c)), int64(off))) {
break
}
v.reset(OpAMD64CMPWconstload)
- v.AuxInt = makeValAndOff(int64(int16(c)), off)
- v.Aux = sym
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
v.AddArg2(ptr, mem)
return true
}
}
// match: (MOVBload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVLconst [int64(read8(sym, off))])
+ // result: (MOVLconst [int32(read8(sym, int64(off)))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = int64(read8(sym, off))
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
return true
}
return false
}
// match: (MOVLload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVQconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64(read32(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
}
// match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
// cond: symIsRO(srcSym)
- // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder))]) mem))
+ // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
for {
- dstOff := v.AuxInt
- dstSym := v.Aux
+ dstOff := auxIntToInt32(v.AuxInt)
+ dstSym := auxToSym(v.Aux)
ptr := v_0
if v_1.Op != OpAMD64MOVOload {
break
}
- srcOff := v_1.AuxInt
- srcSym := v_1.Aux
+ srcOff := auxIntToInt32(v_1.AuxInt)
+ srcSym := auxToSym(v_1.Aux)
v_1_0 := v_1.Args[0]
if v_1_0.Op != OpSB {
break
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = dstOff + 8
- v.Aux = dstSym
+ v.AuxInt = int32ToAuxInt(dstOff + 8)
+ v.Aux = symToAux(dstSym)
v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
- v0.AuxInt = int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder))
+ v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
- v1.AuxInt = dstOff
- v1.Aux = dstSym
+ v1.AuxInt = int32ToAuxInt(dstOff)
+ v1.Aux = symToAux(dstSym)
v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
- v2.AuxInt = int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder))
+ v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
v1.AddArg3(ptr, v2, mem)
v.AddArg3(ptr, v0, v1)
return true
}
// match: (MOVQload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVQconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64(read64(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
}
// match: (MOVWload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVLconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))])
+ // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
- off := v.AuxInt
- sym := v.Aux
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
v.reset(OpAMD64MOVLconst)
- v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder))
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
break
}
// match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
l := v_0
if l.Op != OpAMD64MOVBload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
l2 := v_1
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
continue
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
v.copyOf(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
break
}
// match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
l := v_0
if l.Op != OpAMD64MOVLload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
l2 := v_1
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
continue
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
v.copyOf(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
// cond: c == 0
// result: (FlagEQ)
for {
- c := v.AuxInt
- if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c == 0) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
break
}
v.reset(OpAMD64FlagEQ)
// cond: c < 0
// result: (FlagLT_UGT)
for {
- c := v.AuxInt
- if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c < 0) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
break
}
v.reset(OpAMD64FlagLT_UGT)
// cond: c > 0
// result: (FlagGT_UGT)
for {
- c := v.AuxInt
- if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c > 0) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
break
}
v.reset(OpAMD64FlagGT_UGT)
break
}
// match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
l := v_0
if l.Op != OpAMD64MOVQload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
l2 := v_1
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
continue
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
v.copyOf(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}
}
func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (TESTQconst [c] (MOVQconst [c]))
- // cond: c == 0
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c == 0
// result: (FlagEQ)
for {
- c := v.AuxInt
- if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c == 0) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c == 0) {
break
}
v.reset(OpAMD64FlagEQ)
return true
}
- // match: (TESTQconst [c] (MOVQconst [c]))
- // cond: c < 0
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c < 0
// result: (FlagLT_UGT)
for {
- c := v.AuxInt
- if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c < 0) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c < 0) {
break
}
v.reset(OpAMD64FlagLT_UGT)
return true
}
- // match: (TESTQconst [c] (MOVQconst [c]))
- // cond: c > 0
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c > 0
// result: (FlagGT_UGT)
for {
- c := v.AuxInt
- if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c > 0) {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c > 0) {
break
}
v.reset(OpAMD64FlagGT_UGT)
break
}
// match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
- // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l)
- // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem)
+ // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
l := v_0
if l.Op != OpAMD64MOVWload {
continue
}
- off := l.AuxInt
- sym := l.Aux
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
mem := l.Args[1]
ptr := l.Args[0]
l2 := v_1
- if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
+ if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) {
continue
}
b = l.Block
v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
v.copyOf(v0)
- v0.AuxInt = makeValAndOff(0, off)
- v0.Aux = sym
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off)))
+ v0.Aux = symToAux(sym)
v0.AddArg2(ptr, mem)
return true
}