(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y)
(ORN x (MOVDconst [-1])) => x
-(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && (int64(c) < 0 || int64(c) + d >= 0) => (FlagCarryClear)
-(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && int64(c) >= 0 && int64(c) + d < 0 => (FlagCarrySet)
+(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && (c < 0 || int64(c) + d >= 0) => (FlagCarryClear)
+(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && c >= 0 && int64(c) + d < 0 => (FlagCarrySet)
(MaskIfNotCarry (FlagCarrySet)) => (MOVDconst [0])
(MaskIfNotCarry (FlagCarryClear)) => (MOVDconst [-1])
// the temp register. So don't fold address of global, unless there
// is only one use.
(MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
(MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
(MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
(MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
(FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && (ptr.Op != OpSB || p.Uses == 1) =>
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
(FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
// Fold offsets for loads.
// Fold symbols into storezero
(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
- && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 ->
- (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0 =>
+ (MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
(MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
- && (x.Op != OpSB || p.Uses == 1) ->
- (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
(MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
- && (x.Op != OpSB || p.Uses == 1) ->
- (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
(MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
- && (x.Op != OpSB || p.Uses == 1) ->
- (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
// atomic intrinsics
(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
func rewriteValuePPC64_OpPPC64ADDconstForCarry(v *Value) bool {
v_0 := v.Args[0]
// match: (ADDconstForCarry [c] (MOVDconst [d]))
- // cond: c < 0 && (int64(c) < 0 || int64(c) + d >= 0)
+ // cond: c < 0 && (c < 0 || int64(c) + d >= 0)
// result: (FlagCarryClear)
for {
c := auxIntToInt16(v.AuxInt)
break
}
d := auxIntToInt64(v_0.AuxInt)
- if !(c < 0 && (int64(c) < 0 || int64(c)+d >= 0)) {
+ if !(c < 0 && (c < 0 || int64(c)+d >= 0)) {
break
}
v.reset(OpPPC64FlagCarryClear)
return true
}
// match: (ADDconstForCarry [c] (MOVDconst [d]))
- // cond: c < 0 && int64(c) >= 0 && int64(c) + d < 0
+ // cond: c < 0 && c >= 0 && int64(c) + d < 0
// result: (FlagCarrySet)
for {
c := auxIntToInt16(v.AuxInt)
break
}
d := auxIntToInt64(v_0.AuxInt)
- if !(c < 0 && int64(c) >= 0 && int64(c)+d < 0) {
+ if !(c < 0 && c >= 0 && int64(c)+d < 0) {
break
}
v.reset(OpPPC64FlagCarrySet)
return true
}
// match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (FMOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64FMOVDload)
return true
}
// match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (FMOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64FMOVDstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (FMOVSload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64FMOVSload)
return true
}
// match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (FMOVSstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64FMOVSstore)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVBZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVBZload)
return true
}
// match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVBstore)
}
// match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
- // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ // result: (MOVBstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
x := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVBstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, mem)
return true
}
return true
}
// match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDload)
return true
}
// match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
}
// match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
- // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ // result: (MOVDstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
x := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVDstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVHZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVHZload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVHload)
return true
}
// match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVHstore)
}
// match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
- // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ // result: (MOVHstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
x := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVHstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, mem)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVWZload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVWZload)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0
// result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem)
for {
off1 := auxIntToInt32(v.AuxInt)
sym2 := auxToSym(p.Aux)
ptr := p.Args[0]
mem := v_1
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) && (off1+off2)%4 == 0) {
break
}
v.reset(OpPPC64MOVWload)
return true
}
// match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
// result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem)
for {
off1 := auxIntToInt32(v.AuxInt)
ptr := p.Args[0]
val := v_1
mem := v_2
- if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) {
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVWstore)
}
// match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
- // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ // result: (MOVWstorezero [off1+off2] {mergeSymTyped(sym1,sym2)} x mem)
for {
- off1 := v.AuxInt
- sym1 := v.Aux
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
p := v_0
if p.Op != OpPPC64MOVDaddr {
break
}
- off2 := p.AuxInt
- sym2 := p.Aux
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
x := p.Args[0]
mem := v_1
if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
break
}
v.reset(OpPPC64MOVWstorezero)
- v.AuxInt = off1 + off2
- v.Aux = mergeSym(sym1, sym2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSymTyped(sym1, sym2))
v.AddArg2(x, mem)
return true
}