(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr)
// fold address into load/store
-// only small offset (between -256 and 256) or offset that is a multiple of data size
-// can be encoded in the instructions
-// since this rewriting takes place before stack allocation, the offset to SP is unknown,
-// so don't do it for args and locals with unaligned offset
-(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
(MOVBload [off1+off2] {sym} ptr mem)
-(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
(MOVBUload [off1+off2] {sym} ptr mem)
-(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
(MOVHload [off1+off2] {sym} ptr mem)
-(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
(MOVHUload [off1+off2] {sym} ptr mem)
-(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
(MOVWload [off1+off2] {sym} ptr mem)
-(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
(MOVWUload [off1+off2] {sym} ptr mem)
-(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
(MOVDload [off1+off2] {sym} ptr mem)
-(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
(FMOVSload [off1+off2] {sym} ptr mem)
-(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
(FMOVDload [off1+off2] {sym} ptr mem)
-(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) ->
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 1, sym) ->
(MOVBstore [off1+off2] {sym} ptr val mem)
-(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 2, sym) ->
(MOVHstore [off1+off2] {sym} ptr val mem)
-(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 4, sym) ->
(MOVWstore [off1+off2] {sym} ptr val mem)
-(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 8, sym) ->
(MOVDstore [off1+off2] {sym} ptr val mem)
-(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 4, sym) ->
(FMOVSstore [off1+off2] {sym} ptr val mem)
-(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && fitsARM64Offset(off1+off2, 8, sym) ->
(FMOVDstore [off1+off2] {sym} ptr val mem)
-(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 1, sym) ->
(MOVBstorezero [off1+off2] {sym} ptr mem)
-(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 2, sym) ->
(MOVHstorezero [off1+off2] {sym} ptr mem)
-(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 4, sym) ->
(MOVWstorezero [off1+off2] {sym} ptr mem)
-(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- && is32Bit(off1+off2) && !isArg(sym)
- && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym)) ->
+(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && fitsARM64Offset(off1+off2, 8, sym) ->
(MOVDstorezero [off1+off2] {sym} ptr mem)
-(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) ->
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) ->
+(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) ->
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) ->
+(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2)) ->
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2)) ->
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2)) ->
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
- && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2))
- && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2))) ->
+(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2)) ->
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// store zero
y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
&& i1 == i0+1
- && (i0%2 == 0 || i0<256 && i0>-256 && !isArg(s) && !isAuto(s))
+ && fitsARM64Offset(i0, 2, s)
&& x0.Uses == 1 && x1.Uses == 1
&& y0.Uses == 1 && y1.Uses == 1
&& mergePoint(b,x0,x1) != nil
}
func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (FMOVDload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 8, sym)) {
break
}
v.reset(OpARM64FMOVDload)
return true
}
// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64FMOVDload)
}
func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 8, sym)) {
break
}
v.reset(OpARM64FMOVDstore)
return true
}
// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64FMOVDstore)
}
func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (FMOVSload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 4, sym)) {
break
}
v.reset(OpARM64FMOVSload)
return true
}
// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64FMOVSload)
}
func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 4, sym)) {
break
}
v.reset(OpARM64FMOVSstore)
return true
}
// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64FMOVSstore)
}
func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
// match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: fitsARM64Offset(off1+off2, 1, sym)
// result: (MOVBUload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
+ if !(fitsARM64Offset(off1+off2, 1, sym)) {
break
}
v.reset(OpARM64MOVBUload)
return true
}
// match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))
// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVBUload)
}
func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: fitsARM64Offset(off1+off2, 1, sym)
// result: (MOVBload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
+ if !(fitsARM64Offset(off1+off2, 1, sym)) {
break
}
v.reset(OpARM64MOVBload)
return true
}
// match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))
// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVBload)
}
func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2)
+ // cond: fitsARM64Offset(off1+off2, 1, sym)
// result: (MOVBstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1 + off2)) {
+ if !(fitsARM64Offset(off1+off2, 1, sym)) {
break
}
v.reset(OpARM64MOVBstore)
return true
}
// match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVBstore)
}
func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2)
+ // cond: fitsARM64Offset(off1+off2, 1, sym)
// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1 + off2)) {
+ if !(fitsARM64Offset(off1+off2, 1, sym)) {
break
}
v.reset(OpARM64MOVBstorezero)
return true
}
// match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))
// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 1, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVBstorezero)
}
func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (MOVDload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 8, sym)) {
break
}
v.reset(OpARM64MOVDload)
return true
}
// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVDload)
}
func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (MOVDstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 8, sym)) {
break
}
v.reset(OpARM64MOVDstore)
return true
}
// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVDstore)
}
func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 8, sym)
// result: (MOVDstorezero [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 8, sym)) {
break
}
v.reset(OpARM64MOVDstorezero)
return true
}
// match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))
// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 8, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVDstorezero)
}
func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 2, sym)
// result: (MOVHUload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 2, sym)) {
break
}
v.reset(OpARM64MOVHUload)
return true
}
// match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))
// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVHUload)
}
func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 2, sym)
// result: (MOVHload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 2, sym)) {
break
}
v.reset(OpARM64MOVHload)
return true
}
// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVHload)
}
func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 2, sym)
// result: (MOVHstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 2, sym)) {
break
}
v.reset(OpARM64MOVHstore)
return true
}
// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVHstore)
}
func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 2, sym)
// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 2, sym)) {
break
}
v.reset(OpARM64MOVHstorezero)
return true
}
// match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))
// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 2, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVHstorezero)
}
func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (MOVWUload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 4, sym)) {
break
}
v.reset(OpARM64MOVWUload)
return true
}
// match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVWUload)
}
func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (MOVWload [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 4, sym)) {
break
}
v.reset(OpARM64MOVWload)
return true
}
// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVWload)
}
func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 4, sym)) {
break
}
v.reset(OpARM64MOVWstore)
return true
}
// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for {
off1 := v.AuxInt
ptr := v_0.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVWstore)
}
func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
- // cond: is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(sym))
+ // cond: fitsARM64Offset(off1+off2, 4, sym)
// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
for {
off1 := v.AuxInt
off2 := v_0.AuxInt
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(is32Bit(off1+off2) && !isArg(sym) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(sym))) {
+ if !(fitsARM64Offset(off1+off2, 4, sym)) {
break
}
v.reset(OpARM64MOVWstorezero)
return true
}
// match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
- // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1,sym2)) && ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isAuto(mergeSym(sym1,sym2)))
+ // cond: canMergeSym(sym1,sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))
// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for {
off1 := v.AuxInt
sym2 := v_0.Aux
ptr := v_0.Args[0]
mem := v.Args[1]
- if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && !isArg(mergeSym(sym1, sym2)) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isAuto(mergeSym(sym1, sym2)))) {
+ if !(canMergeSym(sym1, sym2) && fitsARM64Offset(off1+off2, 4, mergeSym(sym1, sym2))) {
break
}
v.reset(OpARM64MOVWstorezero)
return true
}
// match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
- // cond: i1 == i0+1 && (i0%2 == 0 || i0<256 && i0>-256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)
+ // cond: i1 == i0+1 && fitsARM64Offset(i0, 2, s) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)
// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
for {
t := v.Type
if mem != x1.Args[1] {
break
}
- if !(i1 == i0+1 && (i0%2 == 0 || i0 < 256 && i0 > -256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) {
+ if !(i1 == i0+1 && fitsARM64Offset(i0, 2, s) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) {
break
}
b = mergePoint(b, x0, x1)