v := int32(c.regoff(&p.To))
sz := int32(1 << uint(movesize(p.As)))
- r := int(p.To.Reg)
+ r := p.To.Reg
if r == obj.REG_NONE {
- r = int(o.param)
+ r = o.param
}
if v < 0 || v%sz != 0 { /* unscaled 9-bit signed */
- o1 = c.olsr9s(p, int32(c.opstr(p, p.As)), v, r, int(p.From.Reg))
+ o1 = c.olsr9s(p, int32(c.opstr(p, p.As)), v, int(r), int(p.From.Reg))
} else {
v = int32(c.offsetshift(p, int64(v), int(o.a4)))
- o1 = c.olsr12u(p, int32(c.opstr(p, p.As)), v, r, int(p.From.Reg))
+ o1 = c.olsr12u(p, c.opstr(p, p.As), v, r, p.From.Reg)
}
case 21: /* movT O(R),R -> ldrT */
v := int32(c.regoff(&p.From))
sz := int32(1 << uint(movesize(p.As)))
- r := int(p.From.Reg)
+ r := p.From.Reg
if r == obj.REG_NONE {
- r = int(o.param)
+ r = o.param
}
if v < 0 || v%sz != 0 { /* unscaled 9-bit signed */
- o1 = c.olsr9s(p, int32(c.opldr(p, p.As)), v, r, int(p.To.Reg))
+ o1 = c.olsr9s(p, int32(c.opldr(p, p.As)), v, int(r), int(p.To.Reg))
} else {
v = int32(c.offsetshift(p, int64(v), int(o.a1)))
//print("offset=%lld v=%ld a1=%d\n", instoffset, v, o->a1);
- o1 = c.olsr12u(p, int32(c.opldr(p, p.As)), v, r, int(p.To.Reg))
+ o1 = c.olsr12u(p, c.opldr(p, p.As), v, r, p.To.Reg)
}
case 22: /* movT (R)O!,R; movT O(R)!, R -> ldrT */
}
o1 = c.oaddi(p, int32(c.opirr(p, AADD)), hi, r, REGTMP)
- o2 = c.olsr12u(p, int32(c.opstr(p, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.From.Reg))
+ o2 = c.olsr12u(p, c.opstr(p, p.As), ((v-hi)>>uint(s))&0xFFF, REGTMP, p.From.Reg)
break
storeusepool:
}
o1 = c.oaddi(p, int32(c.opirr(p, AADD)), hi, r, REGTMP)
- o2 = c.olsr12u(p, int32(c.opldr(p, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.To.Reg))
+ o2 = c.olsr12u(p, c.opldr(p, p.As), ((v-hi)>>uint(s))&0xFFF, REGTMP, p.To.Reg)
break
loadusepool:
// For unaligned access, fall back to adrp + add + movT R, (REGTMP).
if o.size(c.ctxt, p) != 8 {
o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31
- o3 = c.olsr12u(p, int32(c.opstr(p, p.As)), 0, REGTMP, int(p.From.Reg))
+ o3 = c.olsr12u(p, c.opstr(p, p.As), 0, REGTMP, p.From.Reg)
rel.Type = objabi.R_ADDRARM64
break
}
- o2 = c.olsr12u(p, int32(c.opstr(p, p.As)), 0, REGTMP, int(p.From.Reg))
+ o2 = c.olsr12u(p, c.opstr(p, p.As), 0, REGTMP, p.From.Reg)
rel.Type = c.addrRelocType(p)
case 65: /* movT addr,R -> adrp + movT (REGTMP), R */
// For unaligned access, fall back to adrp + add + movT (REGTMP), R.
if o.size(c.ctxt, p) != 8 {
o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31
- o3 = c.olsr12u(p, int32(c.opldr(p, p.As)), 0, REGTMP, int(p.To.Reg))
+ o3 = c.olsr12u(p, c.opldr(p, p.As), 0, REGTMP, p.To.Reg)
rel.Type = objabi.R_ADDRARM64
break
}
- o2 = c.olsr12u(p, int32(c.opldr(p, p.As)), 0, REGTMP, int(p.To.Reg))
+ o2 = c.olsr12u(p, c.opldr(p, p.As), 0, REGTMP, p.To.Reg)
rel.Type = c.addrRelocType(p)
case 66: /* ldp O(R)!, (r1, r2); ldp (R)O!, (r1, r2) */
case 70: /* IE model movd $tlsvar, reg -> adrp REGTMP, 0; ldr reg, [REGTMP, #0] + relocs */
o1 = ADR(1, 0, REGTMP)
- o2 = c.olsr12u(p, int32(c.opldr(p, AMOVD)), 0, REGTMP, int(p.To.Reg))
+ o2 = c.olsr12u(p, c.opldr(p, AMOVD), 0, REGTMP, p.To.Reg)
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 8
case 71: /* movd sym@GOT, reg -> adrp REGTMP, #0; ldr reg, [REGTMP, #0] + relocs */
o1 = ADR(1, 0, REGTMP)
- o2 = c.olsr12u(p, int32(c.opldr(p, AMOVD)), 0, REGTMP, int(p.To.Reg))
+ o2 = c.olsr12u(p, c.opldr(p, AMOVD), 0, REGTMP, p.To.Reg)
rel := obj.Addrel(c.cursym)
rel.Off = int32(c.pc)
rel.Siz = 8
* load/store register (scaled 12-bit unsigned immediate) C3.3.13
* these produce 64-bit values (when there's an option)
*/
-func (c *ctxt7) olsr12u(p *obj.Prog, o int32, v int32, b int, r int) uint32 {
+func (c *ctxt7) olsr12u(p *obj.Prog, o uint32, v int32, b, r int16) uint32 {
if v < 0 || v >= (1<<12) {
c.ctxt.Diag("offset out of range: %d\n%v", v, p)
}
- o |= (v & 0xFFF) << 10
- o |= int32(b&31) << 5
- o |= int32(r & 31)
+ o |= uint32(v&0xFFF) << 10
+ o |= uint32(b&31) << 5
+ o |= uint32(r & 31)
o |= 1 << 24
- return uint32(o)
+ return o
}
/*