"sort"
)
+// ctxt0 holds state while assembling a single function.
+// Each function gets a fresh ctxt0.
+// This allows for multiple functions to be safely concurrently assembled.
+type ctxt0 struct {
+ ctxt *obj.Link
+ newprog obj.ProgAlloc
+ cursym *obj.LSym
+ autosize int32
+ instoffset int64
+ pc int64
+}
+
// Instruction layout.
const (
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
return
}
- ctxt.Cursym = cursym
- ctxt.Autosize = int32(p.To.Offset + ctxt.FixedFrameSize())
+
+ c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset + ctxt.FixedFrameSize())}
if oprange[AOR&obj.AMask] == nil {
- ctxt.Diag("mips ops not initialized, call mips.buildop first")
+ c.ctxt.Diag("mips ops not initialized, call mips.buildop first")
}
pc := int64(0)
var o *Optab
for p = p.Link; p != nil; p = p.Link {
p.Pc = pc
- o = oplook(ctxt, p)
+ o = c.oplook(p)
m = int(o.size)
if m == 0 {
if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
- ctxt.Diag("zero-width instruction\n%v", p)
+ c.ctxt.Diag("zero-width instruction\n%v", p)
}
continue
}
pc += int64(m)
}
- cursym.Size = pc
+ c.cursym.Size = pc
/*
* if any procedure is large enough to
for bflag != 0 {
bflag = 0
pc = 0
- for p = cursym.Text.Link; p != nil; p = p.Link {
+ for p = c.cursym.Text.Link; p != nil; p = p.Link {
p.Pc = pc
- o = oplook(ctxt, p)
+ o = c.oplook(p)
// very large conditional branches
if o.type_ == 6 && p.Pcond != nil {
otxt = p.Pcond.Pc - pc
if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 {
- q = newprog()
+ q = c.newprog()
q.Link = p.Link
p.Link = q
q.As = AJMP
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Pcond
p.Pcond = q
- q = newprog()
+ q = c.newprog()
q.Link = p.Link
p.Link = q
q.As = AJMP
q.To.Type = obj.TYPE_BRANCH
q.Pcond = q.Link.Link
- addnop(ctxt, p.Link, newprog)
- addnop(ctxt, p, newprog)
+ c.addnop(p.Link)
+ c.addnop(p)
bflag = 1
}
}
m = int(o.size)
if m == 0 {
if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
- ctxt.Diag("zero-width instruction\n%v", p)
+ c.ctxt.Diag("zero-width instruction\n%v", p)
}
continue
}
pc += int64(m)
}
- cursym.Size = pc
+ c.cursym.Size = pc
}
- if ctxt.Arch.Family == sys.MIPS64 {
+ if c.ctxt.Arch.Family == sys.MIPS64 {
pc += -pc & (mips64FuncAlign - 1)
}
- cursym.Size = pc
+ c.cursym.Size = pc
/*
* lay out the code, emitting code and data relocations.
*/
- cursym.Grow(cursym.Size)
+ c.cursym.Grow(c.cursym.Size)
- bp := cursym.P
+ bp := c.cursym.P
var i int32
var out [4]uint32
- for p := cursym.Text.Link; p != nil; p = p.Link {
- ctxt.Pc = p.Pc
- o = oplook(ctxt, p)
+ for p := c.cursym.Text.Link; p != nil; p = p.Link {
+ c.pc = p.Pc
+ o = c.oplook(p)
if int(o.size) > 4*len(out) {
log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p)
}
- asmout(ctxt, p, o, out[:])
+ c.asmout(p, o, out[:])
for i = 0; i < int32(o.size/4); i++ {
- ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
+ c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
bp = bp[4:]
}
}
return uint64(uint32(v)) == v
}
-func aclass(ctxt *obj.Link, a *obj.Addr) int {
+func (c *ctxt0) aclass(a *obj.Addr) int {
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
if a.Sym == nil {
break
}
- ctxt.Instoffset = a.Offset
+ c.instoffset = a.Offset
if a.Sym != nil { // use relocation
if a.Sym.Type == obj.STLSBSS {
return C_TLS
return C_LEXT
case obj.NAME_AUTO:
- ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
- if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ c.instoffset = int64(c.autosize) + a.Offset
+ if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SAUTO
}
return C_LAUTO
case obj.NAME_PARAM:
- ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
- if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
+ if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SAUTO
}
return C_LAUTO
case obj.NAME_NONE:
- ctxt.Instoffset = a.Offset
- if ctxt.Instoffset == 0 {
+ c.instoffset = a.Offset
+ if c.instoffset == 0 {
return C_ZOREG
}
- if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SOREG
}
return C_LOREG
obj.TYPE_ADDR:
switch a.Name {
case obj.NAME_NONE:
- ctxt.Instoffset = a.Offset
+ c.instoffset = a.Offset
if a.Reg != 0 {
- if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
+ if -BIG <= c.instoffset && c.instoffset <= BIG {
return C_SACON
}
- if isint32(ctxt.Instoffset) {
+ if isint32(c.instoffset) {
return C_LACON
}
return C_DACON
break
}
if s.Type == obj.SCONST {
- ctxt.Instoffset = a.Offset
+ c.instoffset = a.Offset
goto consize
}
- ctxt.Instoffset = a.Offset
+ c.instoffset = a.Offset
if s.Type == obj.STLSBSS {
return C_STCON // address of TLS variable
}
return C_LECON
case obj.NAME_AUTO:
- ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
- if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ c.instoffset = int64(c.autosize) + a.Offset
+ if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SACON
}
return C_LACON
case obj.NAME_PARAM:
- ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
- if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
+ if c.instoffset >= -BIG && c.instoffset < BIG {
return C_SACON
}
return C_LACON
return C_GOK
consize:
- if ctxt.Instoffset >= 0 {
- if ctxt.Instoffset == 0 {
+ if c.instoffset >= 0 {
+ if c.instoffset == 0 {
return C_ZCON
}
- if ctxt.Instoffset <= 0x7fff {
+ if c.instoffset <= 0x7fff {
return C_SCON
}
- if ctxt.Instoffset <= 0xffff {
+ if c.instoffset <= 0xffff {
return C_ANDCON
}
- if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
+ if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
return C_UCON
}
- if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
+ if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
return C_LCON
}
return C_LCON // C_DCON
}
- if ctxt.Instoffset >= -0x8000 {
+ if c.instoffset >= -0x8000 {
return C_ADDCON
}
- if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
+ if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
return C_UCON
}
- if isint32(ctxt.Instoffset) {
+ if isint32(c.instoffset) {
return C_LCON
}
return C_LCON // C_DCON
fmt.Printf("%v\n", p)
}
-func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
+func (c *ctxt0) oplook(p *obj.Prog) *Optab {
if oprange[AOR&obj.AMask] == nil {
- ctxt.Diag("mips ops not initialized, call mips.buildop first")
+ c.ctxt.Diag("mips ops not initialized, call mips.buildop first")
}
a1 := int(p.Optab)
}
a1 = int(p.From.Class)
if a1 == 0 {
- a1 = aclass(ctxt, &p.From) + 1
+ a1 = c.aclass(&p.From) + 1
p.From.Class = int8(a1)
}
a1--
a3 := int(p.To.Class)
if a3 == 0 {
- a3 = aclass(ctxt, &p.To) + 1
+ a3 = c.aclass(&p.To) + 1
p.To.Class = int8(a3)
}
c3 := &xcmp[a3]
for i := range ops {
op := &ops[i]
- if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || ctxt.Arch.Family == op.family) {
+ if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || c.ctxt.Arch.Family == op.family) {
p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
return op
}
}
- ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3))
+ c.ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3))
prasm(p)
if ops == nil {
ops = optab
return op | i&0x3FFFFFF
}
-func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
+func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 := uint32(0)
o2 := uint32(0)
o3 := uint32(0)
add := AADDU
- if ctxt.Arch.Family == sys.MIPS64 {
+ if c.ctxt.Arch.Family == sys.MIPS64 {
add = AADDVU
}
switch o.type_ {
default:
- ctxt.Diag("unknown type %d %v", o.type_)
+ c.ctxt.Diag("unknown type %d %v", o.type_)
prasm(p)
case 0: /* pseudo ops */
case 1: /* mov r1,r2 ==> OR r1,r0,r2 */
a := AOR
- if p.As == AMOVW && ctxt.Arch.Family == sys.MIPS64 {
+ if p.As == AMOVW && c.ctxt.Arch.Family == sys.MIPS64 {
a = AADDU // sign-extended to high 32 bits
}
- o1 = OP_RRR(oprrr(ctxt, a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_RRR(c.oprrr(a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
case 2: /* add/sub r1,[r2],r3 */
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 3: /* mov $soreg, r ==> or/add $i,o,r */
- v := regoff(ctxt, &p.From)
+ v := c.regoff(&p.From)
r := int(p.From.Reg)
if r == 0 {
a = AOR
}
- o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg))
case 4: /* add $scon,[r1],r2 */
- v := regoff(ctxt, &p.From)
+ v := c.regoff(&p.From)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
case 5: /* syscall */
- o1 = oprrr(ctxt, p.As)
+ o1 = c.oprrr(p.As)
case 6: /* beq r1,[r2],sbra */
v := int32(0)
v = int32(p.Pcond.Pc-p.Pc-4) >> 2
}
if (v<<16)>>16 != v {
- ctxt.Diag("short branch too far\n%v", p)
+ c.ctxt.Diag("short branch too far\n%v", p)
}
- o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
+ o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
// for ABFPT and ABFPF only: always fill delay slot with 0
// see comments in func preprocess for details.
o2 = 0
if r == 0 {
r = int(o.param)
}
- v := regoff(ctxt, &p.To)
- o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.From.Reg))
+ v := c.regoff(&p.To)
+ o1 = OP_IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.From.Reg))
case 8: /* mov soreg, r ==> lw o(r) */
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
- v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(-p.As), uint32(v), uint32(r), uint32(p.To.Reg))
case 9: /* sll r1,[r2],r3 */
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */
- v := regoff(ctxt, &p.From)
+ v := c.regoff(&p.From)
a := AOR
if v < 0 {
a = AADDU
}
- o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP))
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 11: /* jmp lbra */
v := int32(0)
- if aclass(ctxt, &p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP {
+ if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP {
// use PC-relative branch for short branches
// BEQ R0, R0, sbra
if p.Pcond == nil {
v = int32(p.Pcond.Pc-p.Pc-4) >> 2
}
if (v<<16)>>16 == v {
- o1 = OP_IRR(opirr(ctxt, ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO))
+ o1 = OP_IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO))
break
}
}
} else {
v = int32(p.Pcond.Pc) >> 2
}
- o1 = OP_JMP(opirr(ctxt, p.As), uint32(v))
+ o1 = OP_JMP(c.opirr(p.As), uint32(v))
if p.To.Sym == nil {
- p.To.Sym = ctxt.Cursym.Text.From.Sym
+ p.To.Sym = c.cursym.Text.From.Sym
p.To.Offset = p.Pcond.Pc
}
- rel := obj.Addrel(ctxt.Cursym)
- rel.Off = int32(ctxt.Pc)
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
if p.As == AMOVB {
v = 24
}
- o1 = OP_SRR(opirr(ctxt, ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg))
- o2 = OP_SRR(opirr(ctxt, ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
+ o1 = OP_SRR(c.opirr(ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg))
+ o2 = OP_SRR(c.opirr(ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
case 13: /* movbu r,r */
if p.As == AMOVBU {
- o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_IRR(c.opirr(AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg))
} else {
- o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_IRR(c.opirr(AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg))
}
case 14: /* movwu r,r */
- o1 = OP_SRR(opirr(ctxt, -ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
- o2 = OP_SRR(opirr(ctxt, -ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+ o1 = OP_SRR(c.opirr(-ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
+ o2 = OP_SRR(c.opirr(-ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
case 15: /* teq $c r,r */
- v := regoff(ctxt, &p.From)
+ v := c.regoff(&p.From)
r := int(p.Reg)
if r == 0 {
r = REGZERO
}
/* only use 10 bits of trap code */
- o1 = OP_IRR(opirr(ctxt, p.As), (uint32(v)&0x3FF)<<6, uint32(p.Reg), uint32(p.To.Reg))
+ o1 = OP_IRR(c.opirr(p.As), (uint32(v)&0x3FF)<<6, uint32(p.Reg), uint32(p.To.Reg))
case 16: /* sll $c,[r1],r2 */
- v := regoff(ctxt, &p.From)
+ v := c.regoff(&p.From)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
/* OP_SRR will use only the low 5 bits of the shift value */
if v >= 32 && vshift(p.As) {
- o1 = OP_SRR(opirr(ctxt, -p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
+ o1 = OP_SRR(c.opirr(-p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
} else {
- o1 = OP_SRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_SRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
}
case 17:
- o1 = OP_RRR(oprrr(ctxt, p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_RRR(c.oprrr(p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
case 18: /* jmp [r1],0(r2) */
r := int(p.Reg)
if r == 0 {
r = int(o.param)
}
- o1 = OP_RRR(oprrr(ctxt, p.As), uint32(0), uint32(p.To.Reg), uint32(r))
- rel := obj.Addrel(ctxt.Cursym)
- rel.Off = int32(ctxt.Pc)
+ o1 = OP_RRR(c.oprrr(p.As), uint32(0), uint32(p.To.Reg), uint32(r))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
rel.Siz = 0
rel.Type = obj.R_CALLIND
case 19: /* mov $lcon,r ==> lu+or */
- v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
- o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
+ o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
case 20: /* mov lo/hi,r */
a := OP(2, 0) /* mfhi */
a := SP(3, 4) | 2 /* mul */
o1 = OP_RRR(a, uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
} else {
- o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
+ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
}
case 23: /* add $lcon,r1,r2 ==> lu+or+add */
- v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
- o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o3 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o3 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 24: /* mov $ucon,r ==> lu r */
- v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */
- v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */
- v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
- o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
+ v := c.regoff(&p.From)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
- o3 = OP_RRR(oprrr(ctxt, add), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o3 = OP_RRR(c.oprrr(add), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 27: /* mov [sl]ext/auto/oreg,fr ==> lwc1 o(r) */
- v := regoff(ctxt, &p.From)
+ v := c.regoff(&p.From)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
switch o.size {
case 12:
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
- o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
- o3 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
+ o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
+ o3 = OP_IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
case 4:
- o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg))
}
case 28: /* mov fr,[sl]ext/auto/oreg ==> swc1 o(r) */
- v := regoff(ctxt, &p.To)
+ v := c.regoff(&p.To)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
switch o.size {
case 12:
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
- o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
- o3 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
+ o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
+ o3 = OP_IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
case 4:
- o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.From.Reg))
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.From.Reg))
}
case 30: /* movw r,fr */
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+ o1 = OP_FRRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 33: /* fabs fr1, fr3 */
- o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_FRRR(c.oprrr(p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */
- v := regoff(ctxt, &p.From)
+ v := c.regoff(&p.From)
a := AADDU
if o.a1 == C_ANDCON {
a = AOR
}
- o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP))
+ o1 = OP_IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP))
o2 = OP_RRR(SP(2, 1)|(4<<21), uint32(REGTMP), uint32(0), uint32(p.To.Reg)) /* mtc1 */
case 35: /* mov r,lext/auto/oreg ==> sw o(REGTMP) */
- v := regoff(ctxt, &p.To)
+ v := c.regoff(&p.To)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
- o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
- o3 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
+ o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
+ o3 = OP_IRR(c.opirr(p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
case 36: /* mov lext/auto/oreg,r ==> lw o(REGTMP) */
- v := regoff(ctxt, &p.From)
+ v := c.regoff(&p.From)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
- o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
- o3 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
+ o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP))
+ o3 = OP_IRR(c.opirr(-p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
case 37: /* movw r,mr */
a := SP(2, 0) | (4 << 21) /* mtc0 */
o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
case 40: /* word */
- o1 = uint32(regoff(ctxt, &p.From))
+ o1 = uint32(c.regoff(&p.From))
case 41: /* movw f,fcr */
o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(REGZERO), uint32(0), uint32(p.To.Reg)) /* mfcc1 */
/* relocation operations */
case 50: /* mov r,addr ==> lu + add REGSB, REGTMP + sw o(REGTMP) */
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
- rel := obj.Addrel(ctxt.Cursym)
- rel.Off = int32(ctxt.Pc)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
rel.Type = obj.R_ADDRMIPSU
- o2 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
- rel2 := obj.Addrel(ctxt.Cursym)
- rel2.Off = int32(ctxt.Pc + 4)
+ o2 = OP_IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
+ rel2 := obj.Addrel(c.cursym)
+ rel2.Off = int32(c.pc + 4)
rel2.Siz = 4
rel2.Sym = p.To.Sym
rel2.Add = p.To.Offset
if o.size == 12 {
o3 = o2
- o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
rel2.Off += 4
}
case 51: /* mov addr,r ==> lu + add REGSB, REGTMP + lw o(REGTMP) */
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
- rel := obj.Addrel(ctxt.Cursym)
- rel.Off = int32(ctxt.Pc)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = obj.R_ADDRMIPSU
- o2 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
- rel2 := obj.Addrel(ctxt.Cursym)
- rel2.Off = int32(ctxt.Pc + 4)
+ o2 = OP_IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
+ rel2 := obj.Addrel(c.cursym)
+ rel2.Off = int32(c.pc + 4)
rel2.Siz = 4
rel2.Sym = p.From.Sym
rel2.Add = p.From.Offset
if o.size == 12 {
o3 = o2
- o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
+ o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
rel2.Off += 4
}
case 52: /* mov $lext, r ==> lu + add REGSB, r + add */
- o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(p.To.Reg))
- rel := obj.Addrel(ctxt.Cursym)
- rel.Off = int32(ctxt.Pc)
+ o1 = OP_IRR(c.opirr(ALUI), uint32(0), uint32(REGZERO), uint32(p.To.Reg))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = obj.R_ADDRMIPSU
- o2 = OP_IRR(opirr(ctxt, add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
- rel2 := obj.Addrel(ctxt.Cursym)
- rel2.Off = int32(ctxt.Pc + 4)
+ o2 = OP_IRR(c.opirr(add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+ rel2 := obj.Addrel(c.cursym)
+ rel2.Off = int32(c.pc + 4)
rel2.Siz = 4
rel2.Sym = p.From.Sym
rel2.Add = p.From.Offset
if o.size == 12 {
o3 = o2
- o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(p.To.Reg), uint32(p.To.Reg))
+ o2 = OP_RRR(c.oprrr(AADDVU), uint32(REGSB), uint32(p.To.Reg), uint32(p.To.Reg))
rel2.Off += 4
}
// clobbers R3 !
// load thread pointer with RDHWR, R3 is used for fast kernel emulation on Linux
o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
- o2 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REG_R3), uint32(p.From.Reg))
- rel := obj.Addrel(ctxt.Cursym)
- rel.Off = int32(ctxt.Pc + 4)
+ o2 = OP_IRR(c.opirr(p.As), uint32(0), uint32(REG_R3), uint32(p.From.Reg))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc + 4)
rel.Siz = 4
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
case 54: /* mov tlsvar, r ==> rdhwr + lw o(r3) */
// clobbers R3 !
o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
- o2 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
- rel := obj.Addrel(ctxt.Cursym)
- rel.Off = int32(ctxt.Pc + 4)
+ o2 = OP_IRR(c.opirr(-p.As), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc + 4)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
case 55: /* mov $tlsvar, r ==> rdhwr + add */
// clobbers R3 !
o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
- o2 = OP_IRR(opirr(ctxt, add), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
- rel := obj.Addrel(ctxt.Cursym)
- rel.Off = int32(ctxt.Pc + 4)
+ o2 = OP_IRR(c.opirr(add), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc + 4)
rel.Siz = 4
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
return
}
-func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
- ctxt.Instoffset = 0
- aclass(ctxt, a)
- return ctxt.Instoffset
+func (c *ctxt0) vregoff(a *obj.Addr) int64 {
+ c.instoffset = 0
+ c.aclass(a)
+ return c.instoffset
}
-func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
- return int32(vregoff(ctxt, a))
+func (c *ctxt0) regoff(a *obj.Addr) int32 {
+ return int32(c.vregoff(a))
}
-func oprrr(ctxt *obj.Link, a obj.As) uint32 {
+func (c *ctxt0) oprrr(a obj.As) uint32 {
switch a {
case AADD:
return OP(4, 0)
}
if a < 0 {
- ctxt.Diag("bad rrr opcode -%v", -a)
+ c.ctxt.Diag("bad rrr opcode -%v", -a)
} else {
- ctxt.Diag("bad rrr opcode %v", a)
+ c.ctxt.Diag("bad rrr opcode %v", a)
}
return 0
}
-func opirr(ctxt *obj.Link, a obj.As) uint32 {
+func (c *ctxt0) opirr(a obj.As) uint32 {
switch a {
case AADD:
return SP(1, 0)
}
if a < 0 {
- ctxt.Diag("bad irr opcode -%v", -a)
+ c.ctxt.Diag("bad irr opcode -%v", -a)
} else {
- ctxt.Diag("bad irr opcode %v", a)
+ c.ctxt.Diag("bad irr opcode %v", a)
}
return 0
}
)
func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
+ c := ctxt0{ctxt: ctxt, newprog: newprog}
+
p.From.Class = 0
p.To.Class = 0
case AMOVD:
if p.From.Type == obj.TYPE_FCONST {
f64 := p.From.Val.(float64)
- if math.Float64bits(f64) == 0 && ctxt.Arch.Family == sys.MIPS64 {
+ if math.Float64bits(f64) == 0 && c.ctxt.Arch.Family == sys.MIPS64 {
p.As = AMOVV
p.From.Type = obj.TYPE_REG
p.From.Reg = REGZERO
func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
// TODO(minux): add morestack short-cuts with small fixed frame-size.
- ctxt.Cursym = cursym
+ c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym}
// a switch for enabling/disabling instruction scheduling
nosched := true
- if cursym.Text == nil || cursym.Text.Link == nil {
+ if c.cursym.Text == nil || c.cursym.Text.Link == nil {
return
}
- p := cursym.Text
+ p := c.cursym.Text
textstksiz := p.To.Offset
- cursym.Args = p.To.Val.(int32)
- cursym.Locals = int32(textstksiz)
+ c.cursym.Args = p.To.Val.(int32)
+ c.cursym.Locals = int32(textstksiz)
/*
* find leaf subroutines
var q *obj.Prog
var q1 *obj.Prog
- for p := cursym.Text; p != nil; p = p.Link {
+ for p := c.cursym.Text; p != nil; p = p.Link {
switch p.As {
/* too hard, just leave alone */
case obj.ATEXT:
AJAL,
obj.ADUFFZERO,
obj.ADUFFCOPY:
- cursym.Text.Mark &^= LEAF
+ c.cursym.Text.Mark &^= LEAF
fallthrough
case AJMP,
}
var mov, add obj.As
- if ctxt.Arch.Family == sys.MIPS64 {
+ if c.ctxt.Arch.Family == sys.MIPS64 {
add = AADDV
mov = AMOVV
} else {
autosize := int32(0)
var p1 *obj.Prog
var p2 *obj.Prog
- for p := cursym.Text; p != nil; p = p.Link {
+ for p := c.cursym.Text; p != nil; p = p.Link {
o := p.As
switch o {
case obj.ATEXT:
autosize = int32(textstksiz + ctxt.FixedFrameSize())
if (p.Mark&LEAF != 0) && autosize <= int32(ctxt.FixedFrameSize()) {
autosize = 0
- } else if autosize&4 != 0 && ctxt.Arch.Family == sys.MIPS64 {
+ } else if autosize&4 != 0 && c.ctxt.Arch.Family == sys.MIPS64 {
autosize += 4
}
p.To.Offset = int64(autosize) - ctxt.FixedFrameSize()
if p.From3.Offset&obj.NOSPLIT == 0 {
- p = stacksplit(ctxt, p, newprog, autosize) // emit split check
+ p = c.stacksplit(p, autosize) // emit split check
}
q = p
q.To.Type = obj.TYPE_REG
q.To.Reg = REGSP
q.Spadj = +autosize
- } else if cursym.Text.Mark&LEAF == 0 {
- if cursym.Text.From3.Offset&obj.NOSPLIT != 0 {
+ } else if c.cursym.Text.Mark&LEAF == 0 {
+ if c.cursym.Text.From3.Offset&obj.NOSPLIT != 0 {
if ctxt.Debugvlog {
- ctxt.Logf("save suppressed in: %s\n", cursym.Name)
+ ctxt.Logf("save suppressed in: %s\n", c.cursym.Name)
}
- cursym.Text.Mark |= LEAF
+ c.cursym.Text.Mark |= LEAF
}
}
- if cursym.Text.Mark&LEAF != 0 {
- cursym.Set(obj.AttrLeaf, true)
+ if c.cursym.Text.Mark&LEAF != 0 {
+ c.cursym.Set(obj.AttrLeaf, true)
break
}
- if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
+ if c.cursym.Text.From3.Offset&obj.WRAPPER != 0 {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
// MOV g_panic(g), R1
q.As = mov
q.From.Type = obj.TYPE_MEM
q.From.Reg = REGG
- q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
+ q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
q.To.Type = obj.TYPE_REG
q.To.Reg = REG_R1
p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction
p.To.Sym = nil
- if cursym.Text.Mark&LEAF != 0 {
+ if c.cursym.Text.Mark&LEAF != 0 {
if autosize == 0 {
p.As = AJMP
p.From = obj.Addr{}
p.To.Reg = REGSP
p.Spadj = -autosize
- q = newprog()
+ q = c.newprog()
q.As = AJMP
q.Pos = p.Pos
q.To.Type = obj.TYPE_MEM
}
if autosize != 0 {
- q = newprog()
+ q = c.newprog()
q.As = add
q.Pos = p.Pos
q.From.Type = obj.TYPE_CONST
p.Link = q
}
- q1 = newprog()
+ q1 = c.newprog()
q1.As = AJMP
q1.Pos = p.Pos
if retSym != nil { // retjmp
}
}
- if ctxt.Arch.Family == sys.MIPS {
+ if c.ctxt.Arch.Family == sys.MIPS {
// rewrite MOVD into two MOVF in 32-bit mode to avoid unaligned memory access
- for p = cursym.Text; p != nil; p = p1 {
+ for p = c.cursym.Text; p != nil; p = p1 {
p1 = p.Link
if p.As != AMOVD {
}
p.As = AMOVF
- q = newprog()
+ q = c.newprog()
*q = *p
q.Link = p.Link
p.Link = q
p1 = q.Link
var regOff int16
- if ctxt.Arch.ByteOrder == binary.BigEndian {
+ if c.ctxt.Arch.ByteOrder == binary.BigEndian {
regOff = 1 // load odd register first
}
if p.From.Type == obj.TYPE_MEM {
if nosched {
// if we don't do instruction scheduling, simply add
// NOP after each branch instruction.
- for p = cursym.Text; p != nil; p = p.Link {
+ for p = c.cursym.Text; p != nil; p = p.Link {
if p.Mark&BRANCH != 0 {
- addnop(ctxt, p, newprog)
+ c.addnop(p)
}
}
return
}
// instruction scheduling
- q = nil // p - 1
- q1 = cursym.Text // top of block
- o := 0 // count of instructions
- for p = cursym.Text; p != nil; p = p1 {
+ q = nil // p - 1
+ q1 = c.cursym.Text // top of block
+ o := 0 // count of instructions
+ for p = c.cursym.Text; p != nil; p = p1 {
p1 = p.Link
o++
if p.Mark&NOSCHED != 0 {
if q1 != p {
- sched(ctxt, newprog, q1, q)
+ c.sched(q1, q)
}
for ; p != nil; p = p.Link {
if p.Mark&NOSCHED == 0 {
}
if p.Mark&(LABEL|SYNC) != 0 {
if q1 != p {
- sched(ctxt, newprog, q1, q)
+ c.sched(q1, q)
}
q1 = p
o = 1
}
if p.Mark&(BRANCH|SYNC) != 0 {
- sched(ctxt, newprog, q1, p)
+ c.sched(q1, p)
q1 = p1
o = 0
}
if o >= NSCHED {
- sched(ctxt, newprog, q1, p)
+ c.sched(q1, p)
q1 = p1
o = 0
}
}
}
-func stacksplit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc, framesize int32) *obj.Prog {
+func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
// Leaf function with no frame is effectively NOSPLIT.
if framesize == 0 {
return p
var mov, add, sub obj.As
- if ctxt.Arch.Family == sys.MIPS64 {
+ if c.ctxt.Arch.Family == sys.MIPS64 {
add = AADDV
mov = AMOVV
sub = ASUBVU
}
// MOV g_stackguard(g), R1
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = mov
p.From.Type = obj.TYPE_MEM
p.From.Reg = REGG
- p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
- if ctxt.Cursym.CFunc() {
- p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
+ p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0
+ if c.cursym.CFunc() {
+ p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// AGTU SP, stackguard, R1
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = ASGTU
p.From.Type = obj.TYPE_REG
// large stack: SP-framesize < stackguard-StackSmall
// ADD $-(framesize-StackSmall), SP, R2
// SGTU R2, stackguard, R1
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = add
p.From.Type = obj.TYPE_CONST
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = ASGTU
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R2
// SUB R1, R2
// MOV $(framesize+(StackGuard-StackSmall)), R1
// SGTU R2, R1, R1
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = mov
p.From.Type = obj.TYPE_CONST
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
q = p
p.As = ABEQ
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_BRANCH
p.Mark |= BRANCH
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = add
p.From.Type = obj.TYPE_CONST
p.From.Offset = obj.StackGuard
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = sub
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = mov
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = ASGTU
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R2
}
// q1: BNE R1, done
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
q1 := p
p.As = ABNE
p.Mark |= BRANCH
// MOV LINK, R3
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = mov
p.From.Type = obj.TYPE_REG
}
// JAL runtime.morestack(SB)
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = AJAL
p.To.Type = obj.TYPE_BRANCH
- if ctxt.Cursym.CFunc() {
- p.To.Sym = ctxt.Lookup("runtime.morestackc", 0)
- } else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
- p.To.Sym = ctxt.Lookup("runtime.morestack_noctxt", 0)
+ if c.cursym.CFunc() {
+ p.To.Sym = c.ctxt.Lookup("runtime.morestackc", 0)
+ } else if c.cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
+ p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt", 0)
} else {
- p.To.Sym = ctxt.Lookup("runtime.morestack", 0)
+ p.To.Sym = c.ctxt.Lookup("runtime.morestack", 0)
}
p.Mark |= BRANCH
// JMP start
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = AJMP
p.To.Type = obj.TYPE_BRANCH
- p.Pcond = ctxt.Cursym.Text.Link
+ p.Pcond = c.cursym.Text.Link
p.Mark |= BRANCH
// placeholder for q1's jump target
- p = obj.Appendp(p, newprog)
+ p = obj.Appendp(p, c.newprog)
p.As = obj.ANOP // zero-width place holder
q1.Pcond = p
return p
}
-func addnop(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
- q := newprog()
+func (c *ctxt0) addnop(p *obj.Prog) {
+ q := c.newprog()
// we want to use the canonical NOP (SLL $0,R0,R0) here,
// however, as the assembler will always replace $0
// as R0, we have to resort to manually encode the SLL
comp bool
}
-func sched(ctxt *obj.Link, newprog obj.ProgAlloc, p0, pe *obj.Prog) {
+func (c *ctxt0) sched(p0, pe *obj.Prog) {
var sch [NSCHED]Sch
/*
s := sch[:]
for p := p0; ; p = p.Link {
s[0].p = *p
- markregused(ctxt, &s[0])
+ c.markregused(&s[0])
if p == pe {
break
}
}
}
for u := t[1:]; -cap(u) <= -cap(s); u = u[1:] {
- if depend(ctxt, &u[0], &t[0]) {
+ if c.depend(&u[0], &t[0]) {
goto no2
}
}
}
for s[0].nop != 0 {
s[0].nop--
- addnop(ctxt, p, newprog)
+ c.addnop(p)
}
}
}
-func markregused(ctxt *obj.Link, s *Sch) {
+func (c *ctxt0) markregused(s *Sch) {
p := &s.p
- s.comp = compound(ctxt, p)
+ s.comp = c.compound(p)
s.nop = 0
if s.comp {
s.set.ireg |= 1 << (REGTMP - REG_R0)
*/
switch p.As {
case obj.ATEXT:
- ctxt.Autosize = int32(p.To.Offset + 8)
+ c.autosize = int32(p.To.Offset + 8)
ad = 1
case AJAL:
*/
cls := int(p.To.Class)
if cls == 0 {
- cls = aclass(ctxt, &p.To) + 1
+ cls = c.aclass(&p.To) + 1
p.To.Class = int8(cls)
}
cls--
break
}
s.size = uint8(sz)
- s.soffset = regoff(ctxt, &p.To)
+ s.soffset = c.regoff(&p.To)
m := uint32(ANYMEM)
if cls == REGSB {
break
}
s.size = uint8(sz)
- s.soffset = regoff(ctxt, &p.To)
+ s.soffset = c.regoff(&p.To)
if ar != 0 {
s.used.cc |= E_MEMSP
break
}
s.size = uint8(sz)
- s.soffset = regoff(ctxt, &p.To)
+ s.soffset = c.regoff(&p.To)
if ar != 0 {
s.used.cc |= E_MEMSB
*/
cls = int(p.From.Class)
if cls == 0 {
- cls = aclass(ctxt, &p.From) + 1
+ cls = c.aclass(&p.From) + 1
p.From.Class = int8(cls)
}
cls--
p.Mark |= LOAD
}
s.size = uint8(sz)
- s.soffset = regoff(ctxt, &p.From)
+ s.soffset = c.regoff(&p.From)
m := uint32(ANYMEM)
if cls == REGSB {
break
}
s.size = uint8(sz)
- s.soffset = regoff(ctxt, &p.From)
+ s.soffset = c.regoff(&p.From)
s.used.cc |= E_MEMSP
break
}
s.size = uint8(sz)
- s.soffset = regoff(ctxt, &p.From)
+ s.soffset = c.regoff(&p.From)
s.used.cc |= E_MEMSB
}
* test to see if two instructions can be
* interchanged without changing semantics
*/
-func depend(ctxt *obj.Link, sa, sb *Sch) bool {
+func (c *ctxt0) depend(sa, sb *Sch) bool {
if sa.set.ireg&(sb.set.ireg|sb.used.ireg) != 0 {
return true
}
*/
if sa.used.cc&sb.used.cc&E_MEM != 0 {
if sa.p.Reg == sb.p.Reg {
- if regoff(ctxt, &sa.p.From) == regoff(ctxt, &sb.p.From) {
+ if c.regoff(&sa.p.From) == c.regoff(&sb.p.From) {
return true
}
}
return false
}
-func compound(ctxt *obj.Link, p *obj.Prog) bool {
- o := oplook(ctxt, p)
+func (c *ctxt0) compound(p *obj.Prog) bool {
+ o := c.oplook(p)
if o.size != 4 {
return true
}