FMOVQ 65520(R10), F10 // 4afdff3d
FMOVQ 64(RSP), F11 // eb13c03d
-// large aligned offset, use two instructions(add+ldr/store).
+// medium offsets that either fit a single instruction or can use add+ldr/str
+ MOVD -4095(R17), R3 // 3bfe3fd1630340f9
+ MOVD -391(R17), R3 // 3b1e06d1630340f9
+ MOVD -257(R17), R3 // 3b0604d1630340f9
+ MOVD -256(R17), R3 // 230250f8
+ MOVD 255(R17), R3 // 23f24ff8
+ MOVD 256(R17), R3 // 238240f9
+ MOVD 257(R17), R3 // 3b060491630340f9
+ MOVD 391(R17), R3 // 3b1e0691630340f9
+ MOVD 4095(R17), R3 // 3bfe3f91630340f9
+
+ MOVD R0, -4095(R17) // 3bfe3fd1600300f9
+ MOVD R0, -391(R17) // 3b1e06d1600300f9
+ MOVD R0, -257(R17) // 3b0604d1600300f9
+ MOVD R0, -256(R17) // 200210f8
+ MOVD R0, 255(R17) // 20f20ff8
+ MOVD R0, 256(R17) // 208200f9
+ MOVD R0, 257(R17) // 3b060491600300f9
+ MOVD R0, 391(R17) // 3b1e0691600300f9
+ MOVD R0, 4095(R17) // 3bfe3f91600300f9
+ MOVD R0, 4096(R17) // 200208f9
+ MOVD R3, -4095(R17) // 3bfe3fd1630300f9
+ MOVD R3, -391(R17) // 3b1e06d1630300f9
+ MOVD R3, -257(R17) // 3b0604d1630300f9
+ MOVD R3, -256(R17) // 230210f8
+ MOVD R3, 255(R17) // 23f20ff8
+ MOVD R3, 256(R17) // 238200f9
+ MOVD R3, 257(R17) // 3b060491630300f9
+ MOVD R3, 391(R17) // 3b1e0691630300f9
+ MOVD R3, 4095(R17) // 3bfe3f91630300f9
+
+// large aligned offset, use two instructions(add+ldr/str).
MOVB R1, 0x1001(R2) // MOVB R1, 4097(R2) // 5b04409161070039
MOVB R1, 0xffffff(R2) // MOVB R1, 16777215(R2) // 5bfc7f9161ff3f39
MOVH R1, 0x2002(R2) // MOVH R1, 8194(R2) // 5b08409161070079
}
needsPool := true
+ if v >= -4095 && v <= 4095 {
+ needsPool = false
+ }
+
switch p.As {
case AMOVB, AMOVBU:
if cmp(C_UAUTO4K, lsc) || cmp(C_UOREG4K, lsc) {
o1 |= uint32(p.From.Reg&31)<<5 | uint32(p.To.Reg&31)
case 30: /* movT R,L(R) -> strT */
- // if offset L can be split into hi+lo, and both fit into instructions, do
+ // If offset L fits in a 12 bit unsigned immediate:
+ // add $L, R, Rtmp or sub $L, R, Rtmp
+ // str R, (Rtmp)
+ // Otherwise, if offset L can be split into hi+lo, and both fit into instructions:
// add $hi, R, Rtmp
// str R, lo(Rtmp)
- // otherwise, use constant pool
+ // Otherwise, use constant pool:
// mov $L, Rtmp (from constant pool)
// str R, (R+Rtmp)
s := movesize(o.as)
}
v := c.regoff(&p.To)
+ if v >= -256 && v <= 256 {
+ c.ctxt.Diag("%v: bad type for offset %d (should be 9 bit signed immediate store)", p, v)
+ }
+ if v >= 0 && v <= 4095 && v&((1<<int32(s))-1) == 0 {
+ c.ctxt.Diag("%v: bad type for offset %d (should be 12 bit unsigned immediate store)", p, v)
+ }
+
+ // Handle smaller unaligned and negative offsets via addition or subtraction.
+ if v >= -4095 && v <= 4095 {
+ o1 = c.oaddi12(p, v, REGTMP, int16(r))
+ o2 = c.olsr12u(p, c.opstr(p, p.As), 0, REGTMP, p.From.Reg)
+ break
+ }
+
hi, lo, err := splitImm24uScaled(v, s)
if err != nil {
goto storeusepool
o2 = c.olsxrr(p, int32(c.opstrr(p, p.As, false)), int(p.From.Reg), int(r), REGTMP)
case 31: /* movT L(R), R -> ldrT */
- // if offset L can be split into hi+lo, and both fit into instructions, do
+ // If offset L fits in a 12 bit unsigned immediate:
+ // add $L, R, Rtmp or sub $L, R, Rtmp
+ // ldr R, (Rtmp)
+ // Otherwise, if offset L can be split into hi+lo, and both fit into instructions:
// add $hi, R, Rtmp
// ldr lo(Rtmp), R
- // otherwise, use constant pool
+ // Otherwise, use constant pool:
// mov $L, Rtmp (from constant pool)
// ldr (R+Rtmp), R
s := movesize(o.as)
}
v := c.regoff(&p.From)
+ if v >= -256 && v <= 256 {
+ c.ctxt.Diag("%v: bad type for offset %d (should be 9 bit signed immediate load)", p, v)
+ }
+ if v >= 0 && v <= 4095 && v&((1<<int32(s))-1) == 0 {
+ c.ctxt.Diag("%v: bad type for offset %d (should be 12 bit unsigned immediate load)", p, v)
+ }
+
+ // Handle smaller unaligned and negative offsets via addition or subtraction.
+ if v >= -4095 && v <= 4095 {
+ o1 = c.oaddi12(p, v, REGTMP, int16(r))
+ o2 = c.olsr12u(p, c.opldr(p, p.As), 0, REGTMP, p.To.Reg)
+ break
+ }
+
hi, lo, err := splitImm24uScaled(v, s)
if err != nil {
goto loadusepool