(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
(MOVBUload [off1+off2] {sym} ptr mem)
(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHload [off1+off2] {sym} ptr mem)
(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHUload [off1+off2] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWload [off1+off2] {sym} ptr mem)
(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWUload [off1+off2] {sym} ptr mem)
(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDload [off1+off2] {sym} ptr mem)
(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVSload [off1+off2] {sym} ptr mem)
(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVDload [off1+off2] {sym} ptr mem)
(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) ->
(MOVBstore [off1+off2] {sym} ptr val mem)
(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHstore [off1+off2] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWstore [off1+off2] {sym} ptr val mem)
(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDstore [off1+off2] {sym} ptr val mem)
(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVSstore [off1+off2] {sym} ptr val mem)
(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(FMOVDstore [off1+off2] {sym} ptr val mem)
(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
(MOVBstorezero [off1+off2] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVHstorezero [off1+off2] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVWstorezero [off1+off2] {sym} ptr mem)
(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
+ && is32Bit(off1+off2) && !isArg(sym)
+ && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
(MOVDstorezero [off1+off2] {sym} ptr mem)
(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) ->
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is32Bit(off1+off2) ->
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2))) ->
+ && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
+ && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// store zero
b := v.Block
_ = b
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVDload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64FMOVDload)
return true
}
// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64FMOVDload)
b := v.Block
_ = b
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64FMOVDstore)
return true
}
// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64FMOVDstore)
b := v.Block
_ = b
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVSload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64FMOVSload)
return true
}
// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64FMOVSload)
b := v.Block
_ = b
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64FMOVSstore)
return true
}
// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64FMOVSstore)
b := v.Block
_ = b
// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVDload)
return true
}
// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVDload)
b := v.Block
_ = b
// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVDstore)
return true
}
// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVDstore)
b := v.Block
_ = b
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVDstorezero [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVDstorezero)
return true
}
// match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVDstorezero)
b := v.Block
_ = b
// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHUload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVHUload)
return true
}
// match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVHUload)
b := v.Block
_ = b
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVHload)
return true
}
// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVHload)
b := v.Block
_ = b
// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVHstore)
return true
}
// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVHstore)
b := v.Block
_ = b
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVHstorezero)
return true
}
// match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVHstorezero)
b := v.Block
_ = b
// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWUload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVWUload)
return true
}
// match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVWUload)
b := v.Block
_ = b
// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVWload)
return true
}
// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVWload)
b := v.Block
_ = b
// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVWstore)
return true
}
// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVWstore)
b := v.Block
_ = b
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
+ // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
+ if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
break
}
v.reset(OpARM64MOVWstorezero)
return true
}
// match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(mergeSym(sym1,sym2)) && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(mergeSym(sym1, sym2)) && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
break
}
v.reset(OpARM64MOVWstorezero)