gc.Dump("cgen-res", res)
}
- var n1 gc.Node
- var nr *gc.Node
- var nl *gc.Node
- var a int
- var f1 gc.Node
- var f0 gc.Node
- var n2 gc.Node
if n == nil || n.Type == nil {
- goto ret
+ return
}
if res == nil || res.Type == nil {
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
- goto ret
+ return
}
}
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
- goto ret
+ return
}
// update addressability for string, slice
regfree(&n1)
}
- goto ret
+ return
}
// if both are not addressable, use a temporary.
}
sudoclean()
- goto ret
+ return
}
}
// otherwise, the result is addressable but n is not.
// let's do some computation.
- nl = n.Left
+ nl := n.Left
- nr = n.Right
+ nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
n2 := *n
n2.Left = &n1
cgen(&n2, res)
- goto ret
+ return
}
}
}
}
+ var a int
+ var f0 gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
if nl != nil && gc.Isfloat[n.Type.Etype] != 0 && gc.Isfloat[nl.Type.Etype] != 0 {
- goto flt
+ // floating-point.
+ regalloc(&f0, nl.Type, res)
+
+ if nr != nil {
+ goto flt2
+ }
+
+ if n.Op == gc.OMINUS {
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ n.Op = gc.OMUL
+ goto flt2
+ }
+
+ // unary
+ cgen(nl, &f0)
+
+ if n.Op != gc.OCONV && n.Op != gc.OPLUS {
+ gins(optoas(int(n.Op), n.Type), &f0, &f0)
+ }
+ gmove(&f0, res)
+ regfree(&f0)
+ return
}
switch n.Op {
default:
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
- goto ret
+ return
case gc.OPLUS:
cgen(nl, res)
- goto ret
+ return
// unary
case gc.OCOM:
gc.OMUL:
a = optoas(int(n.Op), nl.Type)
- goto sbop
+ // symmetric binary
+ if nl.Ullman < nr.Ullman {
+ r := nl
+ nl = nr
+ nr = r
+ }
+ goto abop
// asymmetric binary
case gc.OSUB:
goto abop
}
- goto ret
-
-sbop: // symmetric binary
- if nl.Ullman < nr.Ullman {
- r := nl
- nl = nr
- nr = r
- }
+ return
// TODO(kaib): use fewer registers here.
abop: // asymmetric binary
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
- goto ret
-
-flt: // floating-point.
- regalloc(&f0, nl.Type, res)
-
- if nr != nil {
- goto flt2
- }
-
- if n.Op == gc.OMINUS {
- nr = gc.Nodintconst(-1)
- gc.Convlit(&nr, n.Type)
- n.Op = gc.OMUL
- goto flt2
- }
-
- // unary
- cgen(nl, &f0)
-
- if n.Op != gc.OCONV && n.Op != gc.OPLUS {
- gins(optoas(int(n.Op), n.Type), &f0, &f0)
- }
- gmove(&f0, res)
- regfree(&f0)
- goto ret
+ return
flt2: // binary
+ var f1 gc.Node
if nl.Ullman >= nr.Ullman {
cgen(nl, &f0)
regalloc(&f1, n.Type, nil)
gmove(&f1, res)
regfree(&f0)
regfree(&f1)
- goto ret
-
-ret:
+ return
}
/*
n = n.Left
}
- var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
gins(arm.AMOVW, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
if n.Addable != 0 {
gins(arm.AMOVW, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
- nl = n.Left
+ nl := n.Left
switch n.Op {
default:
regfree(&n3)
}
}
-
-ret:
}
/*
gc.Genlist(n.Ninit)
}
- var et int
- var nl *gc.Node
- var nr *gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
- goto ret
+ return
}
}
- et = int(n.Type.Etype)
+ et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
- goto ret
+ return
}
- nr = nil
+ nr := (*gc.Node)(nil)
+ var nl *gc.Node
switch n.Op {
default:
a := gc.ONE
a = gc.OEQ
}
gencmp0(n, n.Type, a, likely, to)
- goto ret
+ return
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
}
- goto ret
+ return
case gc.OANDAND,
gc.OOROR:
bgen(n.Right, true_, likely, to)
}
- goto ret
+ return
case gc.OEQ,
gc.ONE,
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
- goto ret
+ return
}
fallthrough
nl = n.Left
if nl == nil || nl.Type == nil {
- goto ret
+ return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
- goto ret
+ return
case gc.OEQ,
gc.ONE,
n.Ninit = ll
gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
gc.Patch(p2, gc.Pc)
- goto ret
+ return
}
a = gc.Brcom(a)
regfree(&n2)
}
- goto ret
-
-ret:
+ return
}
/*
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
- goto ret
+ return
}
// call pointer
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
- goto ret
+ return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
-
-ret:
}
/*
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
- var flo gc.Node
var a int
var r1 gc.Node
- var fhi gc.Node
if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
switch uint32(ft)<<16 | uint32(tt) {
default:
- goto fatal
+ // should not happen
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+ return
/*
* integer copy and truncate
// truncate 64 bit integer
trunc64:
+ var fhi gc.Node
+ var flo gc.Node
split64(f, &flo, &fhi)
regalloc(&r1, t.Type, nil)
regfree(&r1)
splitclean()
return
-
- // should not happen
-fatal:
- gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
func samaddr(f *gc.Node, t *gc.Node) bool {
*a = obj.Addr{}
- var oary [10]int64
- var nn *gc.Node
- var reg *gc.Node
- var n1 gc.Node
- var reg1 *gc.Node
- var o int
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
if v >= 32000 || v <= -32000 {
break
}
- goto lit
+ switch as {
+ default:
+ return false
+
+ case arm.AADD,
+ arm.ASUB,
+ arm.AAND,
+ arm.AORR,
+ arm.AEOR,
+ arm.AMOVB,
+ arm.AMOVBS,
+ arm.AMOVBU,
+ arm.AMOVH,
+ arm.AMOVHS,
+ arm.AMOVHU,
+ arm.AMOVW:
+ break
+ }
+
+ cleani += 2
+ reg := &clean[cleani-1]
+ reg1 := &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(n, a, 1)
+ return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
- reg = &clean[cleani-1]
+ reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
- goto odot
-
- case gc.OINDEX:
- return false
- }
-
- return false
+ var nn *gc.Node
+ var oary [10]int64
+ o := gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ sudoclean()
+ return false
+ }
-lit:
- switch as {
- default:
- return false
+ if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 := *nn
- case arm.AADD,
- arm.ASUB,
- arm.AAND,
- arm.AORR,
- arm.AEOR,
- arm.AMOVB,
- arm.AMOVBS,
- arm.AMOVBU,
- arm.AMOVH,
- arm.AMOVHS,
- arm.AMOVHU,
- arm.AMOVW:
- break
- }
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ gc.Naddr(&n1, a, 1)
+ return true
+ }
- cleani += 2
- reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
- reg.Op = gc.OEMPTY
- reg1.Op = gc.OEMPTY
- gc.Naddr(n, a, 1)
- goto yes
-
-odot:
- o = gc.Dotoffset(n, oary[:], &nn)
- if nn == nil {
- goto no
- }
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 := *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
- if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
- // directly addressable set of DOTs
- n1 := *nn
+ for i := 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatal("can't happen")
+ }
+ gins(arm.AMOVW, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
+ a.Type = obj.TYPE_NONE
+ a.Name = obj.NAME_NONE
n1.Type = n.Type
- n1.Xoffset += oary[0]
gc.Naddr(&n1, a, 1)
- goto yes
- }
-
- regalloc(reg, gc.Types[gc.Tptr], nil)
- n1 = *reg
- n1.Op = gc.OINDREG
- if oary[0] >= 0 {
- agen(nn, reg)
- n1.Xoffset = oary[0]
- } else {
- cgen(nn, reg)
- gc.Cgen_checknil(reg)
- n1.Xoffset = -(oary[0] + 1)
- }
+ return true
- for i := 1; i < o; i++ {
- if oary[i] >= 0 {
- gc.Fatal("can't happen")
- }
- gins(arm.AMOVW, &n1, reg)
- gc.Cgen_checknil(reg)
- n1.Xoffset = -(oary[i] + 1)
+ case gc.OINDEX:
+ return false
}
- a.Type = obj.TYPE_NONE
- a.Name = obj.NAME_NONE
- n1.Type = n.Type
- gc.Naddr(&n1, a, 1)
- goto yes
-
-yes:
- return true
-
-no:
- sudoclean()
return false
}
if !regtyp(v2) {
return false
}
- var r *gc.Flow
var info gc.ProgInfo
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
}
if p.To.Type == v1.Type {
if p.To.Reg == v1.Reg {
if p.Scond == arm.C_SCOND_NONE {
- goto gotit
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(int(v1.Reg))
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
}
}
}
}
return false
-
-gotit:
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
- if p.From.Type == v2.Type {
- fmt.Printf(" excise")
- }
- fmt.Printf("\n")
- }
-
- for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
- p = r.Prog
- copysub(&p.From, v1, v2, 1)
- copysub1(p, v1, v2, 1)
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v\n", r.Prog)
- }
- }
-
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v last\n", r.Prog)
- }
- return true
}
/*
import "cmd/internal/ld"
func needlib(name string) int {
- var p string
- var s *ld.LSym
-
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
- p = fmt.Sprintf(".dynlib.%s", name)
+ p := fmt.Sprintf(".dynlib.%s", name)
- s = ld.Linklookup(ld.Ctxt, p, 0)
+ s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
- var targ *ld.LSym
- var rel *ld.LSym
-
- targ = r.Sym
+ targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
- rel = ld.Linklookup(ld.Ctxt, ".rel", 0)
+ rel := ld.Linklookup(ld.Ctxt, ".rel", 0)
ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off))
ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynmic reloc
r.Type = ld.R_CONST // write r->add during relocsym
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
- var elfsym int32
-
ld.Thearch.Lput(uint32(sectoff))
- elfsym = r.Xsym.Elfsym
+ elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
}
func elfsetupplt() {
- var plt *ld.LSym
- var got *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
- got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// str lr, [sp, #-4]!
ld.Adduint32(ld.Ctxt, plt, 0xe52de004)
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
- var rs *ld.LSym
- rs = r.Xsym
+ rs := r.Xsym
if rs.Type == ld.SHOSTOBJ || r.Type == ld.R_CALLARM {
if rs.Dynid < 0 {
}
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
- var rs *ld.LSym
-
if ld.Linkmode == ld.LinkExternal {
switch r.Type {
case ld.R_CALLARM:
r.Done = 0
// set up addend for eventual relocation via outer symbol.
- rs = r.Sym
+ rs := r.Sym
r.Xadd = r.Add
if r.Xadd&0x800000 != 0 {
}
func addpltreloc(ctxt *ld.Link, plt *ld.LSym, got *ld.LSym, sym *ld.LSym, typ int) *ld.Reloc {
- var r *ld.Reloc
-
- r = ld.Addrel(plt)
+ r := ld.Addrel(plt)
r.Sym = got
r.Off = int32(plt.Size)
r.Siz = 4
}
func addpltsym(ctxt *ld.Link, s *ld.LSym) {
- var plt *ld.LSym
- var got *ld.LSym
- var rel *ld.LSym
-
if s.Plt >= 0 {
return
}
adddynsym(ctxt, s)
if ld.Iself {
- plt = ld.Linklookup(ctxt, ".plt", 0)
- got = ld.Linklookup(ctxt, ".got.plt", 0)
- rel = ld.Linklookup(ctxt, ".rel.plt", 0)
+ plt := ld.Linklookup(ctxt, ".plt", 0)
+ got := ld.Linklookup(ctxt, ".got.plt", 0)
+ rel := ld.Linklookup(ctxt, ".rel.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
}
func addgotsyminternal(ctxt *ld.Link, s *ld.LSym) {
- var got *ld.LSym
-
if s.Got >= 0 {
return
}
- got = ld.Linklookup(ctxt, ".got", 0)
+ got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Addaddrplus(ctxt, got, s, 0)
}
func addgotsym(ctxt *ld.Link, s *ld.LSym) {
- var got *ld.LSym
- var rel *ld.LSym
-
if s.Got >= 0 {
return
}
adddynsym(ctxt, s)
- got = ld.Linklookup(ctxt, ".got", 0)
+ got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint32(ctxt, got, 0)
if ld.Iself {
- rel = ld.Linklookup(ctxt, ".rel", 0)
+ rel := ld.Linklookup(ctxt, ".rel", 0)
ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_GLOB_DAT))
} else {
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
- var d *ld.LSym
- var t int
- var name string
-
if s.Dynid >= 0 {
return
}
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
- d = ld.Linklookup(ctxt, ".dynsym", 0)
+ d := ld.Linklookup(ctxt, ".dynsym", 0)
/* name */
- name = s.Extname
+ name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
ld.Adduint32(ctxt, d, 0)
/* type */
- t = ld.STB_GLOBAL << 4
+ t := ld.STB_GLOBAL << 4
if (s.Cgoexport&ld.CgoExportDynamic != 0) && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
}
func adddynlib(lib string) {
- var s *ld.LSym
-
if needlib(lib) == 0 {
return
}
if ld.Iself {
- s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
}
func asmb() {
- var symo uint32
- var dwarfoff uint32
- var machlink uint32
- var sect *ld.Section
- var sym *ld.LSym
- var i int
-
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
ld.Asmbelfsetup()
}
- sect = ld.Segtext.Sect
+ sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
- machlink = 0
+ machlink := uint32(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
if ld.Debug['w'] == 0 {
- dwarfoff = uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
+ dwarfoff := uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
ld.Cseek(int64(dwarfoff))
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
ld.Symsize = 0
ld.Lcsize = 0
- symo = 0
+ symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
ld.Asmplan9sym()
ld.Cflush()
- sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
- for i = 0; int32(i) < ld.Lcsize; i++ {
+ for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
}
func archinit() {
- var s *ld.LSym
-
// getgoextlinkenabled is based on GO_EXTLINK_ENABLED when
// Go was built; see ../../make.bash.
if ld.Linkmode == ld.LinkAuto && obj.Getgoextlinkenabled() == "0" {
}
// embed goarm to runtime.goarm
- s = ld.Linklookup(ld.Ctxt, "runtime.goarm", 0)
+ s := ld.Linklookup(ld.Ctxt, "runtime.goarm", 0)
s.Type = ld.SRODATA
ld.Adduint8(ld.Ctxt, s, uint8(ld.Ctxt.Goarm))
gc.Dump("cgen-res", res)
}
- var nl *gc.Node
- var n1 gc.Node
- var nr *gc.Node
- var n2 gc.Node
- var a int
if n == nil || n.Type == nil {
- goto ret
+ return
}
if res == nil || res.Type == nil {
} else {
gc.Cgen_slice(n, res)
}
- goto ret
+ return
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
} else {
gc.Cgen_eface(n, res)
}
- goto ret
+ return
}
if n.Ullman >= gc.UINF {
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
- goto ret
+ return
}
}
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
- goto ret
+ return
}
if res.Addable == 0 {
cgen(&n1, res)
regfree(&n1)
- goto ret
+ return
}
var f int
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
- goto ret
+ return
}
f = 1 // gen thru register
fmt.Printf("%v [ignore previous line]\n", p1)
}
sudoclean()
- goto ret
+ return
}
}
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
- goto ret
+ return
}
// update addressability for string, slice
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
- goto ret
+ return
}
if n.Addable != 0 {
gmove(n, res)
- goto ret
+ return
}
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
n2 := *n
n2.Left = &n1
cgen(&n2, res)
- goto ret
+ return
}
}
}
sudoclean()
- goto ret
+ return
}
}
+ var a int
switch n.Op {
default:
gc.Dump("cgen", n)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
- goto ret
+ return
case gc.OPLUS:
cgen(nl, res)
- goto ret
+ return
// unary
case gc.OCOM:
gins(a, &n2, &n1)
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
case gc.OMINUS:
if gc.Isfloat[nl.Type.Etype] != 0 {
goto sbop
}
- a = optoas(int(n.Op), nl.Type)
- goto uop
+ a := optoas(int(n.Op), nl.Type)
+ // unary
+ var n1 gc.Node
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ return
// symmetric binary
case gc.OAND,
gmove(&n2, res)
regfree(&n2)
regfree(&n1)
- goto ret
+ return
}
}
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
- goto ret
+ return
/*
* put simplest on right - we'll generate into left
}
abop: // asymmetric binary
+ var n1 gc.Node
+ var n2 gc.Node
if nl.Ullman >= nr.Ullman {
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
- goto ret
-
-uop: // unary
- regalloc(&n1, nl.Type, res)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- regfree(&n1)
- goto ret
-
-ret:
+ return
}
/*
n = n.Left
}
- var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
gins(x86.ALEAQ, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
if n.Addable != 0 {
gins(x86.ALEAQ, n, &n1)
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
}
- nl = n.Left
+ nl := n.Left
switch n.Op {
default:
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
}
}
-
-ret:
}
/*
gc.Genlist(n.Ninit)
}
- var a int
- var et int
- var nl *gc.Node
- var n1 gc.Node
- var nr *gc.Node
- var n2 gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
- goto ret
+ return
}
}
- et = int(n.Type.Etype)
+ et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
- goto ret
+ return
}
- nr = nil
+ nr := (*gc.Node)(nil)
for n.Op == gc.OCONVNOP {
n = n.Left
}
}
+ var nl *gc.Node
switch n.Op {
default:
goto def
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
}
- goto ret
+ return
case gc.ONAME:
if n.Addable == 0 {
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
- goto ret
+ return
case gc.OANDAND,
gc.OOROR:
bgen(n.Right, true_, likely, to)
}
- goto ret
+ return
case gc.OEQ,
gc.ONE,
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
- goto ret
+ return
}
fallthrough
nl = n.Left
if nl == nil || nl.Type == nil {
- goto ret
+ return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
- goto ret
+ return
case gc.OEQ,
gc.ONE,
n.Ninit = ll
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p2, gc.Pc)
- goto ret
+ return
}
a = gc.Brcom(a)
regfree(&n2)
}
- goto ret
+ return
def:
+ var n1 gc.Node
regalloc(&n1, n.Type, nil)
cgen(n, &n1)
+ var n2 gc.Node
gc.Nodconst(&n2, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), &n1, &n2)
- a = x86.AJNE
+ a := x86.AJNE
if !true_ {
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
regfree(&n1)
- goto ret
-
-ret:
+ return
}
/*
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var cx gc.Node
- var oldcx gc.Node
- var rcx int
- var tcount *gc.Type
-
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
}
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
}
if nl.Ullman >= gc.UINF {
nr = &n5
}
- rcx = int(reg[x86.REG_CX])
+ rcx := int(reg[x86.REG_CX])
+ var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
- tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
+ tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
- regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ var n3 gc.Node
+ regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
- oldcx = gc.Node{}
+ oldcx := gc.Node{}
if rcx > 0 && !gc.Samereg(&cx, res) {
regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
cx.Type = tcount
+ var n2 gc.Node
if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
} else {
regfree(&n1)
regfree(&n2)
-
-ret:
}
/*
*a = obj.Addr{}
- var o int
- var n1 gc.Node
- var oary [10]int64
- var nn *gc.Node
- var reg *gc.Node
- var reg1 *gc.Node
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
if v >= 32000 || v <= -32000 {
break
}
- goto lit
+ switch as {
+ default:
+ return false
+
+ case x86.AADDB,
+ x86.AADDW,
+ x86.AADDL,
+ x86.AADDQ,
+ x86.ASUBB,
+ x86.ASUBW,
+ x86.ASUBL,
+ x86.ASUBQ,
+ x86.AANDB,
+ x86.AANDW,
+ x86.AANDL,
+ x86.AANDQ,
+ x86.AORB,
+ x86.AORW,
+ x86.AORL,
+ x86.AORQ,
+ x86.AXORB,
+ x86.AXORW,
+ x86.AXORL,
+ x86.AXORQ,
+ x86.AINCB,
+ x86.AINCW,
+ x86.AINCL,
+ x86.AINCQ,
+ x86.ADECB,
+ x86.ADECW,
+ x86.ADECL,
+ x86.ADECQ,
+ x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ:
+ break
+ }
+
+ cleani += 2
+ reg := &clean[cleani-1]
+ reg1 := &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(n, a, 1)
+ return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
- reg = &clean[cleani-1]
+ reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
- goto odot
-
- case gc.OINDEX:
- return false
- }
-
- return false
+ var nn *gc.Node
+ var oary [10]int64
+ o := gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ sudoclean()
+ return false
+ }
-lit:
- switch as {
- default:
- return false
+ if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 := *nn
- case x86.AADDB,
- x86.AADDW,
- x86.AADDL,
- x86.AADDQ,
- x86.ASUBB,
- x86.ASUBW,
- x86.ASUBL,
- x86.ASUBQ,
- x86.AANDB,
- x86.AANDW,
- x86.AANDL,
- x86.AANDQ,
- x86.AORB,
- x86.AORW,
- x86.AORL,
- x86.AORQ,
- x86.AXORB,
- x86.AXORW,
- x86.AXORL,
- x86.AXORQ,
- x86.AINCB,
- x86.AINCW,
- x86.AINCL,
- x86.AINCQ,
- x86.ADECB,
- x86.ADECW,
- x86.ADECL,
- x86.ADECQ,
- x86.AMOVB,
- x86.AMOVW,
- x86.AMOVL,
- x86.AMOVQ:
- break
- }
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ gc.Naddr(&n1, a, 1)
+ return true
+ }
- cleani += 2
- reg = &clean[cleani-1]
- reg1 = &clean[cleani-2]
- reg.Op = gc.OEMPTY
- reg1.Op = gc.OEMPTY
- gc.Naddr(n, a, 1)
- goto yes
-
-odot:
- o = gc.Dotoffset(n, oary[:], &nn)
- if nn == nil {
- goto no
- }
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 := *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
- if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
- // directly addressable set of DOTs
- n1 := *nn
+ for i := 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatal("can't happen")
+ }
+ gins(movptr, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
- n1.Type = n.Type
- n1.Xoffset += oary[0]
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ fixlargeoffset(&n1)
gc.Naddr(&n1, a, 1)
- goto yes
- }
-
- regalloc(reg, gc.Types[gc.Tptr], nil)
- n1 = *reg
- n1.Op = gc.OINDREG
- if oary[0] >= 0 {
- agen(nn, reg)
- n1.Xoffset = oary[0]
- } else {
- cgen(nn, reg)
- gc.Cgen_checknil(reg)
- n1.Xoffset = -(oary[0] + 1)
- }
+ return true
- for i := 1; i < o; i++ {
- if oary[i] >= 0 {
- gc.Fatal("can't happen")
- }
- gins(movptr, &n1, reg)
- gc.Cgen_checknil(reg)
- n1.Xoffset = -(oary[i] + 1)
+ case gc.OINDEX:
+ return false
}
- a.Type = obj.TYPE_NONE
- a.Index = obj.TYPE_NONE
- fixlargeoffset(&n1)
- gc.Naddr(&n1, a, 1)
- goto yes
-
-yes:
- return true
-
-no:
- sudoclean()
return false
}
}
var info gc.ProgInfo
- var r *gc.Flow
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
}
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
- goto gotit
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(int(v1.Reg))
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
}
if copyau(&p.From, v2) || copyau(&p.To, v2) {
fmt.Printf("\tran off end; return 0\n")
}
return false
-
-gotit:
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
- if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
- fmt.Printf(" excise")
- }
- fmt.Printf("\n")
- }
-
- for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
- p = r.Prog
- copysub(&p.From, v1, v2, 1)
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v\n", r.Prog)
- }
- }
-
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v last\n", r.Prog)
- }
- return true
}
/*
var zeroes string
func needlib(name string) int {
- var p string
- var s *ld.LSym
-
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
- p = fmt.Sprintf(".elfload.%s", name)
+ p := fmt.Sprintf(".elfload.%s", name)
- s = ld.Linklookup(ld.Ctxt, p, 0)
+ s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
- var targ *ld.LSym
- var rela *ld.LSym
- var got *ld.LSym
-
- targ = r.Sym
+ targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
- rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
+ rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off))
if r.Siz == 8 {
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_64))
// but we only need to support cgo and that's all it needs.
adddynsym(ld.Ctxt, targ)
- got = ld.Linklookup(ld.Ctxt, ".got", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Type = got.Type | ld.SSUB
s.Outer = got
s.Sub = got.Sub
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
- var elfsym int32
-
ld.Thearch.Vput(uint64(sectoff))
- elfsym = r.Xsym.Elfsym
+ elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
- var rs *ld.LSym
- rs = r.Xsym
+ rs := r.Xsym
if rs.Type == ld.SHOSTOBJ || r.Type == ld.R_PCREL {
if rs.Dynid < 0 {
}
func elfsetupplt() {
- var plt *ld.LSym
- var got *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
- got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// pushq got+8(IP)
ld.Adduint8(ld.Ctxt, plt, 0xff)
adddynsym(ld.Ctxt, s)
if ld.Iself {
- var plt *ld.LSym
- var got *ld.LSym
- var rela *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
- got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
- rela = ld.Linklookup(ld.Ctxt, ".rela.plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
+ rela := ld.Linklookup(ld.Ctxt, ".rela.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
// http://networkpx.blogspot.com/2009/09/about-lcdyldinfoonly-command.html
// has details about what we're avoiding.
- var plt *ld.LSym
-
addgotsym(s)
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
ld.Adduint32(ld.Ctxt, ld.Linklookup(ld.Ctxt, ".linkedit.plt", 0), uint32(s.Dynid))
}
func addgotsym(s *ld.LSym) {
- var got *ld.LSym
- var rela *ld.LSym
-
if s.Got >= 0 {
return
}
adddynsym(ld.Ctxt, s)
- got = ld.Linklookup(ld.Ctxt, ".got", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint64(ld.Ctxt, got, 0)
if ld.Iself {
- rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
+ rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, got, int64(s.Got))
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_GLOB_DAT))
ld.Adduint64(ld.Ctxt, rela, 0)
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
- var d *ld.LSym
- var t int
- var name string
-
if s.Dynid >= 0 {
return
}
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
- d = ld.Linklookup(ctxt, ".dynsym", 0)
+ d := ld.Linklookup(ctxt, ".dynsym", 0)
- name = s.Extname
+ name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
/* type */
- t = ld.STB_GLOBAL << 4
+ t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
}
func adddynlib(lib string) {
- var s *ld.LSym
-
if needlib(lib) == 0 {
return
}
if ld.Iself {
- s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
}
func asmb() {
- var magic int32
- var i int
- var vl int64
- var symo int64
- var dwarfoff int64
- var machlink int64
- var sect *ld.Section
- var sym *ld.LSym
-
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
ld.Asmbelfsetup()
}
- sect = ld.Segtext.Sect
+ sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
- machlink = 0
+ machlink := int64(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
- dwarfoff = ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND))
+ dwarfoff := ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND))
ld.Cseek(dwarfoff)
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
ld.Symsize = 0
ld.Spsize = 0
ld.Lcsize = 0
- symo = 0
+ symo := int64(0)
if ld.Debug['s'] == 0 {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f sym\n", obj.Cputime())
ld.Asmplan9sym()
ld.Cflush()
- sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
- for i = 0; int32(i) < ld.Lcsize; i++ {
+ for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
switch ld.HEADTYPE {
default:
case ld.Hplan9: /* plan9 */
- magic = 4*26*26 + 7
+ magic := int32(4*26*26 + 7)
magic |= 0x00008000 /* fat header */
ld.Lputb(uint32(magic)) /* magic */
ld.Lputb(uint32(ld.Segdata.Filelen))
ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
ld.Lputb(uint32(ld.Symsize)) /* nsyms */
- vl = ld.Entryvalue()
+ vl := ld.Entryvalue()
ld.Lputb(PADDR(uint32(vl))) /* va of entry */
ld.Lputb(uint32(ld.Spsize)) /* sp offsets */
ld.Lputb(uint32(ld.Lcsize)) /* line offsets */
case gc.OMINUS,
gc.OCOM:
- a = optoas(int(n.Op), nl.Type)
- goto uop
+ a := optoas(int(n.Op), nl.Type)
+ // unary
+ var n1 gc.Node
+ gc.Tempname(&n1, nl.Type)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ return
// symmetric binary
case gc.OAND,
break
}
- goto sbop
+ // symmetric binary
+ if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
+ r := nl
+ nl = nr
+ nr = r
+ }
+ goto abop
// asymmetric binary
case gc.OSUB:
return
-sbop: // symmetric binary
- if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
- r := nl
- nl = nr
- nr = r
- }
-
abop: // asymmetric binary
if gc.Smallintconst(nr) {
var n1 gc.Node
}
return
-
-uop: // unary
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- return
}
/*
gins(i386.AMOVL, ncon(0), &lo2)
gins(i386.AMOVL, ncon(0), &hi2)
splitclean()
- goto out
+ return
}
if v >= 32 {
gins(i386.AMOVL, ncon(0), &lo2)
splitclean()
splitclean()
- goto out
+ return
}
// general shift
}
splitclean()
- goto out
+ return
}
if v >= 32 {
}
splitclean()
splitclean()
- goto out
+ return
}
// general shift
splitclean()
splitclean()
- goto out
+ return
}
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &ax, &lo1)
gins(i386.AMOVL, &dx, &hi1)
splitclean()
-
-out:
}
/*
gc.Nodreg(&f0, nl.Type, i386.REG_F0)
gc.Nodreg(&f1, n.Type, i386.REG_F0+1)
if nr != nil {
- goto flt2
+ // binary
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &f0)
+ if nr.Addable != 0 {
+ gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
+ } else {
+ cgen(nr, &f0)
+ gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
+ }
+ } else {
+ cgen(nr, &f0)
+ if nl.Addable != 0 {
+ gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
+ } else {
+ cgen(nl, &f0)
+ gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
+ }
+ }
+
+ gmove(&f0, res)
+ return
}
// unary
}
gmove(&f0, res)
return
-
-flt2: // binary
- if nl.Ullman >= nr.Ullman {
- cgen(nl, &f0)
- if nr.Addable != 0 {
- gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
- } else {
- cgen(nr, &f0)
- gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
- }
- } else {
- cgen(nr, &f0)
- if nl.Addable != 0 {
- gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
- } else {
- cgen(nl, &f0)
- gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
- }
- }
-
- gmove(&f0, res)
- return
}
func cgen_floatsse(n *gc.Node, res *gc.Node) {
var n2 gc.Node
var ax gc.Node
if gc.Use_sse != 0 {
- goto sse
+ if nl.Addable == 0 {
+ var n1 gc.Node
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if nr.Addable == 0 {
+ var tmp gc.Node
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ var n2 gc.Node
+ regalloc(&n2, nr.Type, nil)
+ gmove(nr, &n2)
+ nr = &n2
+
+ if nl.Op != gc.OREGISTER {
+ var n3 gc.Node
+ regalloc(&n3, nl.Type, nil)
+ gmove(nl, &n3)
+ nl = &n3
+ }
+
+ if a == gc.OGE || a == gc.OGT {
+ // only < and <= work right with NaN; reverse if needed
+ r := nr
+
+ nr = nl
+ nl = r
+ a = gc.Brrev(a)
+ }
+
+ gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
+ if nl.Op == gc.OREGISTER {
+ regfree(nl)
+ }
+ regfree(nr)
+ goto ret
} else {
goto x87
}
goto ret
-sse:
- if nl.Addable == 0 {
- var n1 gc.Node
- gc.Tempname(&n1, nl.Type)
- cgen(nl, &n1)
- nl = &n1
- }
-
- if nr.Addable == 0 {
- var tmp gc.Node
- gc.Tempname(&tmp, nr.Type)
- cgen(nr, &tmp)
- nr = &tmp
- }
-
- regalloc(&n2, nr.Type, nil)
- gmove(nr, &n2)
- nr = &n2
-
- if nl.Op != gc.OREGISTER {
- var n3 gc.Node
- regalloc(&n3, nl.Type, nil)
- gmove(nl, &n3)
- nl = &n3
- }
-
- if a == gc.OGE || a == gc.OGT {
- // only < and <= work right with NaN; reverse if needed
- r := nr
-
- nr = nl
- nl = r
- a = gc.Brrev(a)
- }
-
- gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
- if nl.Op == gc.OREGISTER {
- regfree(nl)
- }
- regfree(nr)
-
ret:
if a == gc.OEQ {
// neither NE nor P
et := int(gc.Simtype[t.Etype])
if gc.Use_sse != 0 {
- goto sse
+ switch uint32(op)<<16 | uint32(et) {
+ default:
+ gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OCMP<<16 | gc.TFLOAT32:
+ a = i386.AUCOMISS
+
+ case gc.OCMP<<16 | gc.TFLOAT64:
+ a = i386.AUCOMISD
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = i386.AMOVSS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = i386.AMOVSD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = i386.AADDSS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = i386.AADDSD
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = i386.ASUBSS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = i386.ASUBSD
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = i386.AMULSS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = i386.AMULSD
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = i386.ADIVSS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = i386.ADIVSD
+ }
+
+ return a
}
// If we need Fpop, it means we're working on
gc.Fatal("foptoas %v %v %#x", gc.Oconv(int(op), 0), gc.Tconv(t, 0), flg)
return 0
-
-sse:
- switch uint32(op)<<16 | uint32(et) {
- default:
- gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
-
- case gc.OCMP<<16 | gc.TFLOAT32:
- a = i386.AUCOMISS
-
- case gc.OCMP<<16 | gc.TFLOAT64:
- a = i386.AUCOMISD
-
- case gc.OAS<<16 | gc.TFLOAT32:
- a = i386.AMOVSS
-
- case gc.OAS<<16 | gc.TFLOAT64:
- a = i386.AMOVSD
-
- case gc.OADD<<16 | gc.TFLOAT32:
- a = i386.AADDSS
-
- case gc.OADD<<16 | gc.TFLOAT64:
- a = i386.AADDSD
-
- case gc.OSUB<<16 | gc.TFLOAT32:
- a = i386.ASUBSS
-
- case gc.OSUB<<16 | gc.TFLOAT64:
- a = i386.ASUBSD
-
- case gc.OMUL<<16 | gc.TFLOAT32:
- a = i386.AMULSS
-
- case gc.OMUL<<16 | gc.TFLOAT64:
- a = i386.AMULSD
-
- case gc.ODIV<<16 | gc.TFLOAT32:
- a = i386.ADIVSS
-
- case gc.ODIV<<16 | gc.TFLOAT64:
- a = i386.ADIVSD
- }
-
- return a
}
var resvd = []int{
switch uint32(ft)<<16 | uint32(tt) {
default:
- goto fatal
+ // should not happen
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+ return
/*
* integer copy and truncate
gmove(&r1, t)
regfree(&r1)
return
-
- // should not happen
-fatal:
- gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
func floatmove(f *gc.Node, t *gc.Node) {
return false
}
var info gc.ProgInfo
- var r *gc.Flow
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
}
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
- goto gotit
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
}
if copyau(&p.From, v2) || copyau(&p.To, v2) {
}
return false
-
-gotit:
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
- if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
- fmt.Printf(" excise")
- }
- fmt.Printf("\n")
- }
-
- for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
- p = r.Prog
- copysub(&p.From, v1, v2, 1)
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v\n", r.Prog)
- }
- }
-
- t := int(v1.Reg)
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v last\n", r.Prog)
- }
- return true
}
/*
import "cmd/internal/ld"
func needlib(name string) int {
- var p string
- var s *ld.LSym
-
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
- p = fmt.Sprintf(".dynlib.%s", name)
+ p := fmt.Sprintf(".dynlib.%s", name)
- s = ld.Linklookup(ld.Ctxt, p, 0)
+ s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
- var targ *ld.LSym
- var rel *ld.LSym
- var got *ld.LSym
-
- targ = r.Sym
+ targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
- rel = ld.Linklookup(ld.Ctxt, ".rel", 0)
+ rel := ld.Linklookup(ld.Ctxt, ".rel", 0)
ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off))
ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_386_32))
r.Type = ld.R_CONST // write r->add during relocsym
// but we only need to support cgo and that's all it needs.
adddynsym(ld.Ctxt, targ)
- got = ld.Linklookup(ld.Ctxt, ".got", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Type = got.Type | ld.SSUB
s.Outer = got
s.Sub = got.Sub
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
- var elfsym int32
-
ld.Thearch.Lput(uint32(sectoff))
- elfsym = r.Xsym.Elfsym
+ elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
- var rs *ld.LSym
- rs = r.Xsym
+ rs := r.Xsym
if rs.Type == ld.SHOSTOBJ {
if rs.Dynid < 0 {
}
func elfsetupplt() {
- var plt *ld.LSym
- var got *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
- got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// pushl got+4
ld.Adduint8(ld.Ctxt, plt, 0xff)
}
func addpltsym(ctxt *ld.Link, s *ld.LSym) {
- var plt *ld.LSym
- var got *ld.LSym
- var rel *ld.LSym
-
if s.Plt >= 0 {
return
}
adddynsym(ctxt, s)
if ld.Iself {
- plt = ld.Linklookup(ctxt, ".plt", 0)
- got = ld.Linklookup(ctxt, ".got.plt", 0)
- rel = ld.Linklookup(ctxt, ".rel.plt", 0)
+ plt := ld.Linklookup(ctxt, ".plt", 0)
+ got := ld.Linklookup(ctxt, ".got.plt", 0)
+ rel := ld.Linklookup(ctxt, ".rel.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
} else if ld.HEADTYPE == ld.Hdarwin {
// Same laziness as in 6l.
- var plt *ld.LSym
-
- plt = ld.Linklookup(ctxt, ".plt", 0)
+ plt := ld.Linklookup(ctxt, ".plt", 0)
addgotsym(ctxt, s)
}
func addgotsym(ctxt *ld.Link, s *ld.LSym) {
- var got *ld.LSym
- var rel *ld.LSym
-
if s.Got >= 0 {
return
}
adddynsym(ctxt, s)
- got = ld.Linklookup(ctxt, ".got", 0)
+ got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint32(ctxt, got, 0)
if ld.Iself {
- rel = ld.Linklookup(ctxt, ".rel", 0)
+ rel := ld.Linklookup(ctxt, ".rel", 0)
ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_386_GLOB_DAT))
} else if ld.HEADTYPE == ld.Hdarwin {
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
- var d *ld.LSym
- var t int
- var name string
-
if s.Dynid >= 0 {
return
}
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
- d = ld.Linklookup(ctxt, ".dynsym", 0)
+ d := ld.Linklookup(ctxt, ".dynsym", 0)
/* name */
- name = s.Extname
+ name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
ld.Adduint32(ctxt, d, 0)
/* type */
- t = ld.STB_GLOBAL << 4
+ t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
}
func adddynlib(lib string) {
- var s *ld.LSym
-
if needlib(lib) == 0 {
return
}
if ld.Iself {
- s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
}
func asmb() {
- var magic int32
- var symo uint32
- var dwarfoff uint32
- var machlink uint32
- var sect *ld.Section
- var sym *ld.LSym
- var i int
-
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
ld.Asmbelfsetup()
}
- sect = ld.Segtext.Sect
+ sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
- machlink = 0
+ machlink := uint32(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
- dwarfoff = uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
+ dwarfoff := uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
ld.Cseek(int64(dwarfoff))
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
ld.Symsize = 0
ld.Spsize = 0
ld.Lcsize = 0
- symo = 0
+ symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
ld.Asmplan9sym()
ld.Cflush()
- sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
- for i = 0; int32(i) < ld.Lcsize; i++ {
+ for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
switch ld.HEADTYPE {
default:
case ld.Hplan9: /* plan9 */
- magic = 4*11*11 + 7
+ magic := int32(4*11*11 + 7)
ld.Lputb(uint32(magic)) /* magic */
ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */
gc.Dump("cgen-res", res)
}
- var a int
- var nr *gc.Node
- var nl *gc.Node
- var n1 gc.Node
- var n2 gc.Node
if n == nil || n.Type == nil {
- goto ret
+ return
}
if res == nil || res.Type == nil {
} else {
gc.Cgen_slice(n, res)
}
- goto ret
+ return
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
} else {
gc.Cgen_eface(n, res)
}
- goto ret
+ return
}
if n.Ullman >= gc.UINF {
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
- goto ret
+ return
}
}
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
- goto ret
+ return
}
if res.Addable == 0 {
cgen(&n1, res)
regfree(&n1)
- goto ret
+ return
}
var f int
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
- goto ret
+ return
}
f = 1 // gen thru register
fmt.Printf("%v [ignore previous line]\n", p1)
}
sudoclean()
- goto ret
+ return
}
}
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
- goto ret
+ return
}
// update addressability for string, slice
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
- goto ret
+ return
}
// if both are addressable, move
regfree(&n1)
}
- goto ret
+ return
}
- nl = n.Left
- nr = n.Right
+ nl := n.Left
+ nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
n2 := *n
n2.Left = &n1
cgen(&n2, res)
- goto ret
+ return
}
}
}
sudoclean()
- goto ret
+ return
}
}
// OGE, OLE, and ONE ourselves.
// if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;
+ var a int
switch n.Op {
default:
gc.Dump("cgen", n)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
- goto ret
+ return
case gc.OPLUS:
cgen(nl, res)
- goto ret
+ return
// unary
case gc.OCOM:
gins(a, &n2, &n1)
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
case gc.OMINUS:
if gc.Isfloat[nl.Type.Etype] != 0 {
goto sbop
}
- a = optoas(int(n.Op), nl.Type)
- goto uop
+ a := optoas(int(n.Op), nl.Type)
+ // unary
+ var n1 gc.Node
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ return
// symmetric binary
case gc.OAND,
gmove(&n2, res)
regfree(&n2)
regfree(&n1)
- goto ret
+ return
}
}
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
- goto ret
+ return
/*
* put simplest on right - we'll generate into left
}
abop: // asymmetric binary
+ var n1 gc.Node
+ var n2 gc.Node
if nl.Ullman >= nr.Ullman {
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
- goto ret
-
-uop: // unary
- regalloc(&n1, nl.Type, res)
-
- cgen(nl, &n1)
- gins(a, nil, &n1)
- gmove(&n1, res)
- regfree(&n1)
- goto ret
-
-ret:
+ return
}
/*
n = n.Left
}
- var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
gins(ppc64.AMOVD, &n3, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
if n.Addable != 0 {
gins(ppc64.AMOVD, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
- goto ret
+ return
}
- nl = n.Left
+ nl := n.Left
switch n.Op {
default:
ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
}
}
-
-ret:
}
/*
gc.Genlist(n.Ninit)
}
- var et int
- var nl *gc.Node
- var nr *gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
- goto ret
+ return
}
}
- et = int(n.Type.Etype)
+ et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
- goto ret
+ return
}
- nr = nil
+ nr := (*gc.Node)(nil)
for n.Op == gc.OCONVNOP {
n = n.Left
}
}
+ var nl *gc.Node
switch n.Op {
default:
var n1 gc.Node
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
regfree(&n1)
- goto ret
+ return
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
}
- goto ret
+ return
case gc.OANDAND,
gc.OOROR:
bgen(n.Right, true_, likely, to)
}
- goto ret
+ return
case gc.OEQ,
gc.ONE,
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
- goto ret
+ return
}
fallthrough
nl = n.Left
if nl == nil || nl.Type == nil {
- goto ret
+ return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
- goto ret
+ return
case gc.OEQ,
gc.ONE,
n.Ninit = ll
gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to)
gc.Patch(p2, gc.Pc)
- goto ret
+ return
}
a = gc.Brcom(a)
regfree(&n2)
}
- goto ret
-
-ret:
+ return
}
/*
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// TODO(minux): enable division by magic multiply (also need to fix longmod below)
//if(nr->op != OLITERAL)
- goto longdiv
-
// division and mod using (slow) hardware instruction
-longdiv:
dodiv(op, nl, nr, res)
return
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- var n1 gc.Node
- var n2 gc.Node
- var n3 gc.Node
- var tcount *gc.Type
-
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
}
gmove(&n1, res)
regfree(&n1)
- goto ret
+ return
}
if nl.Ullman >= gc.UINF {
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
- tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
+ tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
+ var n1 gc.Node
regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
- regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ var n3 gc.Node
+ regalloc(&n3, tcount, &n1) // to clear high bits of CX
+ var n2 gc.Node
regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
regfree(&n1)
regfree(&n2)
-
-ret:
}
func clearfat(nl *gc.Node) {
agen(nl, &dst)
var boff uint64
- var p *obj.Prog
if q > 128 {
- p = gins(ppc64.ASUB, nil, &dst)
+ p := gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
// The loop leaves R3 on the last zeroed dword
boff = 8
} else if q >= 4 {
- p = gins(ppc64.ASUB, nil, &dst)
+ p := gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
f := (*gc.Node)(gc.Sysfunc("duffzero"))
// duffzero leaves R3 on the last zeroed dword
boff = 8
} else {
+ var p *obj.Prog
for t := uint64(0); t < q; t++ {
p = gins(ppc64.AMOVD, &r0, &dst)
p.To.Type = obj.TYPE_MEM
boff = 8 * q
}
+ var p *obj.Prog
for t := uint64(0); t < c; t++ {
p = gins(ppc64.AMOVB, &r0, &dst)
p.To.Type = obj.TYPE_MEM
if !regtyp(v2) {
return false
}
- var r *gc.Flow
var info gc.ProgInfo
- for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
if p.To.Type == v1.Type {
if p.To.Reg == v1.Reg {
- goto gotit
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t := int(int(v1.Reg))
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
}
}
}
}
return false
-
-gotit:
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
- if p.From.Type == v2.Type {
- fmt.Printf(" excise")
- }
- fmt.Printf("\n")
- }
-
- for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
- p = r.Prog
- copysub(&p.From, v1, v2, 1)
- copysub1(p, v1, v2, 1)
- copysub(&p.To, v1, v2, 1)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v\n", r.Prog)
- }
- }
-
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
- if gc.Debug['P'] != 0 {
- fmt.Printf("%v last\n", r.Prog)
- }
- return true
}
/*
import "cmd/internal/ld"
func needlib(name string) int {
- var p string
- var s *ld.LSym
-
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
- p = fmt.Sprintf(".dynlib.%s", name)
+ p := fmt.Sprintf(".dynlib.%s", name)
- s = ld.Linklookup(ld.Ctxt, p, 0)
+ s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
// Construct a call stub in stub that calls symbol targ via its PLT
// entry.
func gencallstub(abicase int, stub *ld.LSym, targ *ld.LSym) {
- var plt *ld.LSym
- var r *ld.Reloc
-
if abicase != 1 {
// If we see R_PPC64_TOCSAVE or R_PPC64_REL24_NOTOC
// relocations, we'll need to implement cases 2 and 3.
log.Fatalf("gencallstub only implements case 1 calls")
}
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
stub.Type = ld.STEXT
ld.Adduint32(ld.Ctxt, stub, 0xf8410018) // std r2,24(r1)
// Load the function pointer from the PLT.
- r = ld.Addrel(stub)
+ r := ld.Addrel(stub)
r.Off = int32(stub.Size)
r.Sym = plt
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
- var targ *ld.LSym
- var rela *ld.LSym
-
- targ = r.Sym
+ targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
// These happen in .toc sections
adddynsym(ld.Ctxt, targ)
- rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
+ rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off))
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_PPC64_ADDR64))
ld.Adduint64(ld.Ctxt, rela, uint64(r.Add))
}
func elfsetupplt() {
- var plt *ld.LSym
-
- plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
if plt.Size == 0 {
// The dynamic linker stores the address of the
// dynamic resolver and the DSO identifier in the two
}
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
- var o1 uint32
- var o2 uint32
- var t int64
-
if ld.Linkmode == ld.LinkExternal {
// TODO(minux): translate R_ADDRPOWER and R_CALLPOWER into standard ELF relocations.
// R_ADDRPOWER corresponds to R_PPC_ADDR16_HA and R_PPC_ADDR16_LO.
// The encoding of the immediate x<<16 + y,
// where x is the low 16 bits of the first instruction and y is the low 16
// bits of the second. Both x and y are signed (int16, not uint16).
- o1 = uint32(r.Add >> 32)
- o2 = uint32(r.Add)
- t = ld.Symaddr(r.Sym)
+ o1 := uint32(r.Add >> 32)
+ o2 := uint32(r.Add)
+ t := ld.Symaddr(r.Sym)
if t < 0 {
ld.Ctxt.Diag("relocation for %s is too big (>=2G): %d", s.Name, ld.Symaddr(r.Sym))
}
case ld.R_CALLPOWER:
// Bits 6 through 29 = (S + A - P) >> 2
+ var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off:])
} else {
o1 = ld.Le32(s.P[r.Off:])
}
- t = ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off))
+ t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off))
if t&3 != 0 {
ld.Ctxt.Diag("relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t)
}
}
func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
- var o1 uint32
switch r.Variant & ld.RV_TYPE_MASK {
default:
ld.Diag("unexpected relocation variant %d", r.Variant)
if r.Variant&ld.RV_CHECK_OVERFLOW != 0 {
// Whether to check for signed or unsigned
// overflow depends on the instruction
+ var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off-2:])
} else {
if r.Variant&ld.RV_CHECK_OVERFLOW != 0 {
// Whether to check for signed or unsigned
// overflow depends on the instruction
+ var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off-2:])
} else {
return int64(int16(t))
case ld.RV_POWER_DS:
+ var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = uint32(ld.Be16(s.P[r.Off:]))
} else {
adddynsym(ctxt, s)
if ld.Iself {
- var plt *ld.LSym
- var rela *ld.LSym
- var glink *ld.LSym
- var r *ld.Reloc
-
- plt = ld.Linklookup(ctxt, ".plt", 0)
- rela = ld.Linklookup(ctxt, ".rela.plt", 0)
+ plt := ld.Linklookup(ctxt, ".plt", 0)
+ rela := ld.Linklookup(ctxt, ".rela.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
// Create the glink resolver if necessary
- glink = ensureglinkresolver()
+ glink := ensureglinkresolver()
// Write symbol resolver stub (just a branch to the
// glink resolver stub)
- r = ld.Addrel(glink)
+ r := ld.Addrel(glink)
r.Sym = glink
r.Off = int32(glink.Size)
// Generate the glink resolver stub if necessary and return the .glink section
func ensureglinkresolver() *ld.LSym {
- var glink *ld.LSym
- var s *ld.LSym
- var r *ld.Reloc
-
- glink = ld.Linklookup(ld.Ctxt, ".glink", 0)
+ glink := ld.Linklookup(ld.Ctxt, ".glink", 0)
if glink.Size != 0 {
return glink
}
ld.Adduint32(ld.Ctxt, glink, 0x7800f082) // srdi r0,r0,2
// r11 = address of the first byte of the PLT
- r = ld.Addrel(glink)
+ r := ld.Addrel(glink)
r.Off = int32(glink.Size)
r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0)
// Add DT_PPC64_GLINK .dynamic entry, which points to 32 bytes
// before the first symbol resolver stub.
- s = ld.Linklookup(ld.Ctxt, ".dynamic", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynamic", 0)
ld.Elfwritedynentsymplus(s, ld.DT_PPC64_GLINK, glink, glink.Size-32)
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
- var d *ld.LSym
- var t int
- var name string
-
if s.Dynid >= 0 {
return
}
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
- d = ld.Linklookup(ctxt, ".dynsym", 0)
+ d := ld.Linklookup(ctxt, ".dynsym", 0)
- name = s.Extname
+ name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
/* type */
- t = ld.STB_GLOBAL << 4
+ t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
}
func adddynlib(lib string) {
- var s *ld.LSym
-
if needlib(lib) == 0 {
return
}
if ld.Iself {
- s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
+ s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
}
func asmb() {
- var symo uint32
- var sect *ld.Section
- var sym *ld.LSym
- var i int
-
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
ld.Asmbelfsetup()
}
- sect = ld.Segtext.Sect
+ sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
ld.Symsize = 0
ld.Lcsize = 0
- symo = 0
+ symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
ld.Asmplan9sym()
ld.Cflush()
- sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
- for i = 0; int32(i) < ld.Lcsize; i++ {
+ for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
}
func Main() {
- var p string
-
// Allow GOARCH=Thestring or GOARCH=Thestringsuffix,
// but not other values.
- p = obj.Getgoarch()
+ p := obj.Getgoarch()
if !strings.HasPrefix(p, Thestring) {
log.Fatalf("cannot use %cc with GOARCH=%s", Thechar, p)
}
func assemble(file string) int {
- var i int
-
if outfile == "" {
outfile = strings.TrimSuffix(filepath.Base(file), ".s") + "." + string(Thechar)
}
fmt.Fprintf(&obuf, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion())
fmt.Fprintf(&obuf, "!\n")
+ var i int
for Pass = 1; Pass <= 2; Pass++ {
pinit(file)
for i = 0; i < len(Dlist); i++ {
* common code for all the assemblers
*/
func pragpack() {
-
for getnsc() != '\n' {
-
}
}
func pragvararg() {
for getnsc() != '\n' {
-
}
}
func pragcgo(name string) {
for getnsc() != '\n' {
-
}
}
func pragfpround() {
for getnsc() != '\n' {
-
}
}
func pragtextflag() {
for getnsc() != '\n' {
-
}
}
func pragdataflag() {
for getnsc() != '\n' {
-
}
}
func pragprofile() {
for getnsc() != '\n' {
-
}
}
func pragincomplete() {
for getnsc() != '\n' {
-
}
}
func setinclude(p string) {
- var i int
-
if p == "" {
return
}
- for i = 1; i < len(include); i++ {
+ for i := 1; i < len(include); i++ {
if p == include[i] {
return
}
}
func pushio() {
- var i *Io
-
- i = iostack
+ i := iostack
if i == nil {
Yyerror("botch in pushio")
errorexit()
}
func newio() {
- var i *Io
var pushdepth int = 0
- i = iofree
+ i := iofree
if i == nil {
pushdepth++
if pushdepth > 1000 {
}
func newfile(s string, f *os.File) {
- var i *Io
-
- i = ionext
+ i := ionext
i.Link = iostack
iostack = i
i.F = f
}
func LabelLookup(s *Sym) *Sym {
- var p string
- var lab *Sym
-
if thetext == nil {
s.Labelname = s.Name
return s
}
- p = string(fmt.Sprintf("%s.%s", thetext.Name, s.Name))
- lab = Lookup(p)
+ p := string(fmt.Sprintf("%s.%s", thetext.Name, s.Name))
+ lab := Lookup(p)
lab.Labelname = s.Name
return lab
}
func Yylex(yylval *Yylval) int {
- var c int
var c1 int
var s *Sym
- c = peekc
+ c := peekc
if c != IGN {
peekc = IGN
goto l1
goto aloop
}
if isdigit(c) {
- goto tnum
+ yybuf.Reset()
+ if c != '0' {
+ goto dc
+ }
+ yybuf.WriteByte(byte(c))
+ c = GETC()
+ c1 = 3
+ if c == 'x' || c == 'X' {
+ c1 = 4
+ c = GETC()
+ } else if c < '0' || c > '7' {
+ goto dc
+ }
+ yylval.Lval = 0
+ for {
+ if c >= '0' && c <= '9' {
+ if c > '7' && c1 == 3 {
+ break
+ }
+ yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
+ yylval.Lval += int64(c) - '0'
+ c = GETC()
+ continue
+ }
+
+ if c1 == 3 {
+ break
+ }
+ if c >= 'A' && c <= 'F' {
+ c += 'a' - 'A'
+ }
+ if c >= 'a' && c <= 'f' {
+ yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
+ yylval.Lval += int64(c) - 'a' + 10
+ c = GETC()
+ continue
+ }
+
+ break
+ }
+
+ goto ncu
}
switch c {
case '\n':
yylval.Sval = last
return int(s.Type)
-tnum:
- yybuf.Reset()
- if c != '0' {
- goto dc
- }
- yybuf.WriteByte(byte(c))
- c = GETC()
- c1 = 3
- if c == 'x' || c == 'X' {
- c1 = 4
- c = GETC()
- } else if c < '0' || c > '7' {
- goto dc
- }
- yylval.Lval = 0
- for {
- if c >= '0' && c <= '9' {
- if c > '7' && c1 == 3 {
- break
- }
- yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
- yylval.Lval += int64(c) - '0'
- c = GETC()
- continue
- }
-
- if c1 == 3 {
- break
- }
- if c >= 'A' && c <= 'F' {
- c += 'a' - 'A'
- }
- if c >= 'a' && c <= 'f' {
- yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
- yylval.Lval += int64(c) - 'a' + 10
- c = GETC()
- continue
- }
-
- break
- }
-
- goto ncu
-
dc:
for {
if !(isdigit(c)) {
}
func getc() int {
- var c int
-
- c = peekc
+ c := peekc
if c != IGN {
peekc = IGN
if c == '\n' {
}
func escchar(e int) int {
- var c int
var l int
loop:
- c = getc()
+ c := getc()
if c == '\n' {
Yyerror("newline in string")
return EOF
}
func filbuf() int {
- var i *Io
var n int
loop:
- i = iostack
+ i := iostack
if i == nil {
return EOF
}
}
func GETC() int {
- var c int
if len(fi.P) == 0 {
return filbuf()
}
- c = int(fi.P[0])
+ c := int(fi.P[0])
fi.P = fi.P[1:]
return c
}
)
func getnsn() int32 {
- var n int32
- var c int
-
- c = getnsc()
+ c := getnsc()
if c < '0' || c > '9' {
return -1
}
- n = 0
+ n := int32(0)
for c >= '0' && c <= '9' {
n = n*10 + int32(c) - '0'
c = getc()
}
func getsym() *Sym {
- var c int
-
- c = getnsc()
+ c := getnsc()
if !isalpha(c) && c != '_' && c < 0x80 {
unget(c)
return nil
}
func getsymdots(dots *int) *Sym {
- var c int
- var s *Sym
-
- s = getsym()
+ s := getsym()
if s != nil {
return s
}
- c = getnsc()
+ c := getnsc()
if c != '.' {
unget(c)
return nil
func dodefine(cp string) {
var s *Sym
- var p string
if i := strings.Index(cp, "="); i >= 0 {
- p = cp[i+1:]
+ p := cp[i+1:]
cp = cp[:i]
s = Lookup(cp)
s.Macro = &Macro{Text: p}
}
func domacro() {
- var i int
- var s *Sym
-
- s = getsym()
+ s := getsym()
if s == nil {
s = Lookup("endif")
}
- for i = 0; i < len(mactab); i++ {
+ for i := 0; i < len(mactab); i++ {
if s.Name == mactab[i].Macname {
if mactab[i].Macf != nil {
mactab[i].Macf()
}
func macund() {
- var s *Sym
-
- s = getsym()
+ s := getsym()
macend()
if s == nil {
Yyerror("syntax in #undef")
)
func macdef() {
- var s *Sym
- var a *Sym
var args [NARG]string
var n int
var i int
var ischr int
var base bytes.Buffer
- s = getsym()
+ s := getsym()
if s == nil {
goto bad
}
c = getnsc()
if c != ')' {
unget(c)
+ var a *Sym
+ var c int
for {
a = getsymdots(&dots)
if a == nil {
ischr = 0
}
} else {
-
if c == '"' || c == '\'' {
base.WriteByte(byte(c))
ischr = c
if s == nil {
Yyerror("syntax in #define")
} else {
-
Yyerror("syntax in #define: %s", s.Name)
}
macend()
}
func macexpand(s *Sym) []byte {
- var l int
- var c int
- var arg []string
- var out bytes.Buffer
- var buf bytes.Buffer
- var cp string
-
if s.Macro.Narg == 0 {
if debug['m'] != 0 {
fmt.Printf("#expand %s %s\n", s.Name, s.Macro.Text)
nargs := s.Macro.Narg - 1
dots := s.Macro.Dots
- c = getnsc()
+ c := getnsc()
+ var arg []string
+ var cp string
+ var out bytes.Buffer
if c != '(' {
goto bad
}
c = getc()
if c != ')' {
unget(c)
- l = 0
+ l := 0
+ var buf bytes.Buffer
+ var c int
for {
c = getc()
if c == '"' {
}
func macinc() {
- var c0 int
var c int
- var i int
var buf bytes.Buffer
var f *os.File
var hp string
var str string
var symb string
- c0 = getnsc()
+ c0 := getnsc()
if c0 != '"' {
c = c0
if c0 != '<' {
goto bad
}
- for i = 0; i < len(include); i++ {
+ for i := 0; i < len(include); i++ {
if i == 0 && c0 == '>' {
continue
}
}
func maclin() {
- var c int
- var n int32
var buf bytes.Buffer
var symb string
- n = getnsn()
- c = getc()
+ n := getnsn()
+ c := getc()
if n < 0 {
goto bad
}
}
func macprag() {
- var s *Sym
- var c0 int
var c int
- var buf bytes.Buffer
- var symb string
- s = getsym()
+ s := getsym()
if s != nil && s.Name == "lib" {
- goto praglib
+ c0 := getnsc()
+ if c0 != '"' {
+ c = c0
+ if c0 != '<' {
+ goto bad
+ }
+ c0 = '>'
+ }
+
+ var buf bytes.Buffer
+ for {
+ c = getc()
+ if c == c0 {
+ break
+ }
+ if c == '\n' {
+ goto bad
+ }
+ buf.WriteByte(byte(c))
+ }
+ symb := buf.String()
+
+ c = getcom()
+ if c != '\n' {
+ goto bad
+ }
+
+ /*
+ * put pragma-line in as a funny history
+ */
+ obj.Linklinehist(Ctxt, int(Lineno), symb, -1)
+ return
}
if s != nil && s.Name == "pack" {
pragpack()
}
for getnsc() != '\n' {
-
- }
- return
-
-praglib:
- c0 = getnsc()
- if c0 != '"' {
- c = c0
- if c0 != '<' {
- goto bad
- }
- c0 = '>'
- }
-
- for {
- c = getc()
- if c == c0 {
- break
- }
- if c == '\n' {
- goto bad
- }
- buf.WriteByte(byte(c))
- }
- symb = buf.String()
-
- c = getcom()
- if c != '\n' {
- goto bad
}
-
- /*
- * put pragma-line in as a funny history
- */
- obj.Linklinehist(Ctxt, int(Lineno), symb, -1)
return
bad:
var v Val
var norig *Node
if nr == nil {
- goto unary
+ // copy numeric value to avoid modifying
+ // nl, in case someone still refers to it (e.g. iota).
+ v = nl.Val
+
+ if wl == TIDEAL {
+ v = copyval(v)
+ }
+
+ switch uint32(n.Op)<<16 | uint32(v.Ctype) {
+ default:
+ if n.Diag == 0 {
+ Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
+ n.Diag = 1
+ }
+
+ return
+
+ case OCONV<<16 | CTNIL,
+ OARRAYBYTESTR<<16 | CTNIL:
+ if n.Type.Etype == TSTRING {
+ v = tostr(v)
+ nl.Type = n.Type
+ break
+ }
+ fallthrough
+
+ // fall through
+ case OCONV<<16 | CTINT,
+ OCONV<<16 | CTRUNE,
+ OCONV<<16 | CTFLT,
+ OCONV<<16 | CTSTR:
+ convlit1(&nl, n.Type, true)
+
+ v = nl.Val
+
+ case OPLUS<<16 | CTINT,
+ OPLUS<<16 | CTRUNE:
+ break
+
+ case OMINUS<<16 | CTINT,
+ OMINUS<<16 | CTRUNE:
+ mpnegfix(v.U.Xval)
+
+ case OCOM<<16 | CTINT,
+ OCOM<<16 | CTRUNE:
+ et := Txxx
+ if nl.Type != nil {
+ et = int(nl.Type.Etype)
+ }
+
+ // calculate the mask in b
+ // result will be (a ^ mask)
+ var b Mpint
+ switch et {
+ // signed guys change sign
+ default:
+ Mpmovecfix(&b, -1)
+
+ // unsigned guys invert their bits
+ case TUINT8,
+ TUINT16,
+ TUINT32,
+ TUINT64,
+ TUINT,
+ TUINTPTR:
+ mpmovefixfix(&b, Maxintval[et])
+ }
+
+ mpxorfixfix(v.U.Xval, &b)
+
+ case OPLUS<<16 | CTFLT:
+ break
+
+ case OMINUS<<16 | CTFLT:
+ mpnegflt(v.U.Fval)
+
+ case OPLUS<<16 | CTCPLX:
+ break
+
+ case OMINUS<<16 | CTCPLX:
+ mpnegflt(&v.U.Cval.Real)
+ mpnegflt(&v.U.Cval.Imag)
+
+ case ONOT<<16 | CTBOOL:
+ if v.U.Bval == 0 {
+ goto settrue
+ }
+ goto setfalse
+ }
+ goto ret
}
if nr.Type == nil {
return
goto ret
- // copy numeric value to avoid modifying
- // nl, in case someone still refers to it (e.g. iota).
-unary:
- v = nl.Val
-
- if wl == TIDEAL {
- v = copyval(v)
- }
-
- switch uint32(n.Op)<<16 | uint32(v.Ctype) {
- default:
- if n.Diag == 0 {
- Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
- n.Diag = 1
- }
-
- return
-
- case OCONV<<16 | CTNIL,
- OARRAYBYTESTR<<16 | CTNIL:
- if n.Type.Etype == TSTRING {
- v = tostr(v)
- nl.Type = n.Type
- break
- }
- fallthrough
-
- // fall through
- case OCONV<<16 | CTINT,
- OCONV<<16 | CTRUNE,
- OCONV<<16 | CTFLT,
- OCONV<<16 | CTSTR:
- convlit1(&nl, n.Type, true)
-
- v = nl.Val
-
- case OPLUS<<16 | CTINT,
- OPLUS<<16 | CTRUNE:
- break
-
- case OMINUS<<16 | CTINT,
- OMINUS<<16 | CTRUNE:
- mpnegfix(v.U.Xval)
-
- case OCOM<<16 | CTINT,
- OCOM<<16 | CTRUNE:
- et := Txxx
- if nl.Type != nil {
- et = int(nl.Type.Etype)
- }
-
- // calculate the mask in b
- // result will be (a ^ mask)
- var b Mpint
- switch et {
- // signed guys change sign
- default:
- Mpmovecfix(&b, -1)
-
- // unsigned guys invert their bits
- case TUINT8,
- TUINT16,
- TUINT32,
- TUINT64,
- TUINT,
- TUINTPTR:
- mpmovefixfix(&b, Maxintval[et])
- }
-
- mpxorfixfix(v.U.Xval, &b)
-
- case OPLUS<<16 | CTFLT:
- break
-
- case OMINUS<<16 | CTFLT:
- mpnegflt(v.U.Fval)
-
- case OPLUS<<16 | CTCPLX:
- break
-
- case OMINUS<<16 | CTCPLX:
- mpnegflt(&v.U.Cval.Real)
- mpnegflt(&v.U.Cval.Imag)
-
- case ONOT<<16 | CTBOOL:
- if v.U.Bval == 0 {
- goto settrue
- }
- goto setfalse
- }
-
ret:
norig = saveorig(n)
*n = *nl
}
if n.Op == OREAL || n.Op == OIMAG {
- goto yes
+ //dump("\ncomplex-yes", n);
+ return true
}
- goto no
+ //dump("\ncomplex-no", n);
+ return false
maybe:
switch n.Op {
OCOMPLEX,
OREAL,
OIMAG:
- goto yes
+ //dump("\ncomplex-yes", n);
+ return true
case ODOT,
ODOTPTR,
OINDEX,
OIND,
ONAME:
- goto yes
+ //dump("\ncomplex-yes", n);
+ return true
}
//dump("\ncomplex-no", n);
-no:
return false
-
- //dump("\ncomplex-yes", n);
-yes:
- return true
}
func Complexmove(f *Node, t *Node) {
var _yylex_lstk *Loophack
func _yylex(yylval *yySymType) int32 {
- var c int
var c1 int
var escflag int
var v int64
prevlineno = lineno
l0:
- c = getc()
+ c := getc()
if yy_isspace(c) {
if c == '\n' && curio.nlsemi != 0 {
ungetc(c)
}
if yy_isdigit(c) {
- goto tnum
+ cp = &lexbuf
+ cp.Reset()
+ if c != '0' {
+ for {
+ cp.WriteByte(byte(c))
+ c = getc()
+ if yy_isdigit(c) {
+ continue
+ }
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+ goto ncu
+ }
+ }
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if c == 'x' || c == 'X' {
+ for {
+ cp.WriteByte(byte(c))
+ c = getc()
+ if yy_isdigit(c) {
+ continue
+ }
+ if c >= 'a' && c <= 'f' {
+ continue
+ }
+ if c >= 'A' && c <= 'F' {
+ continue
+ }
+ if lexbuf.Len() == 2 {
+ Yyerror("malformed hex constant")
+ }
+ if c == 'p' {
+ goto caseep
+ }
+ goto ncu
+ }
+ }
+
+ if c == 'p' { // 0p begins floating point zero
+ goto caseep
+ }
+
+ c1 = 0
+ for {
+ if !yy_isdigit(c) {
+ break
+ }
+ if c < '0' || c > '7' {
+ c1 = 1 // not octal
+ }
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+ if c1 != 0 {
+ Yyerror("malformed octal constant")
+ }
+ goto ncu
}
switch c {
yylval.sym = s
return int32(s.Lexical)
-tnum:
- cp = &lexbuf
- cp.Reset()
- if c != '0' {
- for {
- cp.WriteByte(byte(c))
- c = getc()
- if yy_isdigit(c) {
- continue
- }
- goto dc
- }
- }
-
- cp.WriteByte(byte(c))
- c = getc()
- if c == 'x' || c == 'X' {
- for {
- cp.WriteByte(byte(c))
- c = getc()
- if yy_isdigit(c) {
- continue
- }
- if c >= 'a' && c <= 'f' {
- continue
- }
- if c >= 'A' && c <= 'F' {
- continue
- }
- if lexbuf.Len() == 2 {
- Yyerror("malformed hex constant")
- }
- if c == 'p' {
- goto caseep
- }
- goto ncu
- }
- }
-
- if c == 'p' { // 0p begins floating point zero
- goto caseep
- }
-
- c1 = 0
- for {
- if !yy_isdigit(c) {
- break
- }
- if c < '0' || c > '7' {
- c1 = 1 // not octal
- }
- cp.WriteByte(byte(c))
- c = getc()
- }
-
- if c == '.' {
- goto casedot
- }
- if c == 'e' || c == 'E' {
- goto caseep
- }
- if c == 'i' {
- goto casei
- }
- if c1 != 0 {
- Yyerror("malformed octal constant")
- }
- goto ncu
-
-dc:
- if c == '.' {
- goto casedot
- }
- if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
- goto caseep
- }
- if c == 'i' {
- goto casei
- }
-
ncu:
cp = nil
ungetc(c)
*/
func getlinepragma() int {
var cmd, verb, name string
- var n int
- var cp *bytes.Buffer
- var linep int
c := int(getr())
if c == 'g' {
- goto go_
+ cp := &lexbuf
+ cp.Reset()
+ cp.WriteByte('g') // already read
+ for {
+ c = int(getr())
+ if c == EOF || c >= utf8.RuneSelf {
+ return c
+ }
+ if c == '\n' {
+ break
+ }
+ cp.WriteByte(byte(c))
+ }
+
+ cp = nil
+
+ if strings.HasPrefix(lexbuf.String(), "go:cgo_") {
+ pragcgo(lexbuf.String())
+ }
+
+ cmd = lexbuf.String()
+ verb = cmd
+ if i := strings.Index(verb, " "); i >= 0 {
+ verb = verb[:i]
+ }
+
+ if verb == "go:linkname" {
+ if imported_unsafe == 0 {
+ Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
+ }
+ f := strings.Fields(cmd)
+ if len(f) != 3 {
+ Yyerror("usage: //go:linkname localname linkname")
+ return c
+ }
+
+ Lookup(f[1]).Linkname = f[2]
+ return c
+ }
+
+ if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
+ nointerface = true
+ return c
+ }
+
+ if verb == "go:noescape" {
+ noescape = true
+ return c
+ }
+
+ if verb == "go:nosplit" {
+ nosplit = true
+ return c
+ }
+
+ if verb == "go:nowritebarrier" {
+ if compiling_runtime == 0 {
+ Yyerror("//go:nowritebarrier only allowed in runtime")
+ }
+ nowritebarrier = true
+ return c
+ }
+ return c
}
if c != 'l' {
- goto out
+ return c
}
for i := 1; i < 5; i++ {
c = int(getr())
if c != int("line "[i]) {
- goto out
+ return c
}
}
- cp = &lexbuf
+ cp := &lexbuf
cp.Reset()
- linep = 0
+ linep := 0
for {
c = int(getr())
if c == EOF {
- goto out
+ return c
}
if c == '\n' {
break
cp = nil
if linep == 0 {
- goto out
+ return c
}
- n = 0
+ n := 0
for _, c := range lexbuf.String()[linep:] {
if c < '0' || c > '9' {
goto out
}
if n <= 0 {
- goto out
+ return c
}
// try to avoid allocating file name over and over
for h := Ctxt.Hist; h != nil; h = h.Link {
if h.Name != "" && h.Name == name {
linehist(h.Name, int32(n), 0)
- goto out
+ return c
}
}
linehist(name, int32(n), 0)
- goto out
-
-go_:
- cp = &lexbuf
- cp.Reset()
- cp.WriteByte('g') // already read
- for {
- c = int(getr())
- if c == EOF || c >= utf8.RuneSelf {
- goto out
- }
- if c == '\n' {
- break
- }
- cp.WriteByte(byte(c))
- }
-
- cp = nil
-
- if strings.HasPrefix(lexbuf.String(), "go:cgo_") {
- pragcgo(lexbuf.String())
- }
-
- cmd = lexbuf.String()
- verb = cmd
- if i := strings.Index(verb, " "); i >= 0 {
- verb = verb[:i]
- }
-
- if verb == "go:linkname" {
- if imported_unsafe == 0 {
- Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
- }
- f := strings.Fields(cmd)
- if len(f) != 3 {
- Yyerror("usage: //go:linkname localname linkname")
- goto out
- }
-
- Lookup(f[1]).Linkname = f[2]
- goto out
- }
-
- if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
- nointerface = true
- goto out
- }
-
- if verb == "go:noescape" {
- noescape = true
- goto out
- }
-
- if verb == "go:nosplit" {
- nosplit = true
- goto out
- }
-
- if verb == "go:nowritebarrier" {
- if compiling_runtime == 0 {
- Yyerror("//go:nowritebarrier only allowed in runtime")
- }
- nowritebarrier = true
- goto out
- }
+ return c
out:
return c
var p string
p, ok = getquoted(&q)
if !ok {
- goto err1
+ Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
+ return
}
pragcgobuf += fmt.Sprintf("cgo_dynamic_linker %v\n", plan9quote(p))
- goto out
+ return
- err1:
- Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
- goto out
}
if verb == "dynexport" {
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("%s %v\n", verb, plan9quote(local))
- goto out
+ return
}
remote = getimpsym(&q)
goto err2
}
pragcgobuf += fmt.Sprintf("%s %v %v\n", verb, plan9quote(local), plan9quote(remote))
- goto out
+ return
err2:
Yyerror("usage: //go:%s local [remote]", verb)
- goto out
+ return
}
if verb == "cgo_import_dynamic" || verb == "dynimport" {
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v\n", plan9quote(local))
- goto out
+ return
}
remote = getimpsym(&q)
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v\n", plan9quote(local), plan9quote(remote))
- goto out
+ return
}
p, ok = getquoted(&q)
goto err3
}
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v %v\n", plan9quote(local), plan9quote(remote), plan9quote(p))
- goto out
+ return
err3:
Yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]")
- goto out
+ return
}
if verb == "cgo_import_static" {
local := getimpsym(&q)
if local == "" || more(&q) {
- goto err4
+ Yyerror("usage: //go:cgo_import_static local")
+ return
}
pragcgobuf += fmt.Sprintf("cgo_import_static %v\n", plan9quote(local))
- goto out
+ return
- err4:
- Yyerror("usage: //go:cgo_import_static local")
- goto out
}
if verb == "cgo_ldflag" {
var p string
p, ok = getquoted(&q)
if !ok {
- goto err5
+ Yyerror("usage: //go:cgo_ldflag \"arg\"")
+ return
}
pragcgobuf += fmt.Sprintf("cgo_ldflag %v\n", plan9quote(p))
- goto out
+ return
- err5:
- Yyerror("usage: //go:cgo_ldflag \"arg\"")
- goto out
}
-
-out:
}
type yy struct{}
u := 0
c = int(getr())
- var l int64
var i int
switch c {
case 'x':
'6',
'7':
*escflg = 1 // it's a byte
- goto oct
+ l := int64(c) - '0'
+ for i := 2; i > 0; i-- {
+ c = getc()
+ if c >= '0' && c <= '7' {
+ l = l*8 + int64(c) - '0'
+ continue
+ }
+
+ Yyerror("non-octal character in escape sequence: %c", c)
+ ungetc(c)
+ }
+
+ if l > 255 {
+ Yyerror("octal escape value > 255: %d", l)
+ }
+
+ *val = l
+ return false
case 'a':
c = '\a'
return false
hex:
- l = 0
+ l := int64(0)
for ; i > 0; i-- {
c = getc()
if c >= '0' && c <= '9' {
l = utf8.RuneError
}
- *val = l
- return false
-
-oct:
- l = int64(c) - '0'
- for i := 2; i > 0; i-- {
- c = getc()
- if c >= '0' && c <= '7' {
- l = l*8 + int64(c) - '0'
- continue
- }
-
- Yyerror("non-octal character in escape sequence: %c", c)
- ungetc(c)
- }
-
- if l > 255 {
- Yyerror("octal escape value > 255: %d", l)
- }
-
*val = l
return false
}
//
func mpatofix(a *Mpint, as string) {
var c int
- var s0 string
s := as
f := 0
fallthrough
case '0':
- goto oct
- }
+ var c int
+ c, s = intstarstringplusplus(s)
+ if c == 'x' || c == 'X' {
+ s0 := s
+ var c int
+ c, _ = intstarstringplusplus(s)
+ for c != 0 {
+ if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+ s = s[1:]
+ c, _ = intstarstringplusplus(s)
+ continue
+ }
- for c != 0 {
- if c >= '0' && c <= '9' {
- mpmulcfix(a, 10)
- mpaddcfix(a, int64(c)-'0')
- c, s = intstarstringplusplus(s)
- continue
- }
+ Yyerror("malformed hex constant: %s", as)
+ goto bad
+ }
- Yyerror("malformed decimal constant: %s", as)
- goto bad
- }
+ mphextofix(a, s0)
+ if a.Ovf != 0 {
+ Yyerror("constant too large: %s", as)
+ goto bad
+ }
+ goto out
+ }
+ for c != 0 {
+ if c >= '0' && c <= '7' {
+ mpmulcfix(a, 8)
+ mpaddcfix(a, int64(c)-'0')
+ c, s = intstarstringplusplus(s)
+ continue
+ }
- goto out
+ Yyerror("malformed octal constant: %s", as)
+ goto bad
+ }
-oct:
- c, s = intstarstringplusplus(s)
- if c == 'x' || c == 'X' {
- goto hex
+ goto out
}
+
for c != 0 {
- if c >= '0' && c <= '7' {
- mpmulcfix(a, 8)
+ if c >= '0' && c <= '9' {
+ mpmulcfix(a, 10)
mpaddcfix(a, int64(c)-'0')
c, s = intstarstringplusplus(s)
continue
}
- Yyerror("malformed octal constant: %s", as)
+ Yyerror("malformed decimal constant: %s", as)
goto bad
}
goto out
-hex:
- s0 = s
- c, _ = intstarstringplusplus(s)
- for c != 0 {
- if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
- s = s[1:]
- c, _ = intstarstringplusplus(s)
- continue
- }
-
- Yyerror("malformed hex constant: %s", as)
- goto bad
- }
-
- mphextofix(a, s0)
- if a.Ovf != 0 {
- Yyerror("constant too large: %s", as)
- goto bad
- }
-
out:
if f != 0 {
mpnegfix(a)
}
c := 0
- var x int
if a.Neg != b.Neg {
- goto sub
+ // perform a-b
+ switch mpcmp(a, b) {
+ case 0:
+ Mpmovecfix(a, 0)
+
+ case 1:
+ var x int
+ for i := 0; i < Mpprec; i++ {
+ x = a.A[i] - b.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+
+ case -1:
+ a.Neg ^= 1
+ var x int
+ for i := 0; i < Mpprec; i++ {
+ x = b.A[i] - a.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+ }
+ return
}
// perform a+b
+ var x int
for i := 0; i < Mpprec; i++ {
x = a.A[i] + b.A[i] + c
c = 0
}
return
-
- // perform a-b
-sub:
- switch mpcmp(a, b) {
- case 0:
- Mpmovecfix(a, 0)
-
- case 1:
- var x int
- for i := 0; i < Mpprec; i++ {
- x = a.A[i] - b.A[i] - c
- c = 0
- if x < 0 {
- x += Mpbase
- c = 1
- }
-
- a.A[i] = x
- }
-
- case -1:
- a.Neg ^= 1
- var x int
- for i := 0; i < Mpprec; i++ {
- x = b.A[i] - a.A[i] - c
- c = 0
- if x < 0 {
- x += Mpbase
- c = 1
- }
-
- a.A[i] = x
- }
- }
}
func mpmulfixfix(a *Mpint, b *Mpint) {
PPARAMOUT:
pos, ok := to.Node.(*Node).Opt.(int32) // index in vars
if !ok {
- goto Next1
+ return
}
if pos >= int32(len(vars)) || vars[pos] != to.Node {
Fatal("bad bookkeeping in liveness %v %d", Nconv(to.Node.(*Node), 0), pos)
}
}
}
-
-Next1:
}
// Constructs a new liveness structure used to hold the global state of the
}
func mkvar(f *Flow, a *obj.Addr) Bits {
- var v *Var
- var i int
- var n int
- var et int
- var flag int
- var w int64
- var o int64
- var bit Bits
- var node *Node
- var r *Reg
-
/*
* mark registers used
*/
if a.Type == obj.TYPE_NONE {
- goto none
+ return zbits
}
- r = f.Data.(*Reg)
+ r := f.Data.(*Reg)
r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB
+ var n int
switch a.Type {
default:
regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
if regu == 0 {
- goto none
+ return zbits
}
bit := zbits
bit.b[0] = regu
setaddrs(bit)
a.Type = obj.TYPE_ADDR
Ostats.Naddr++
- goto none
+ return zbits
memcase:
fallthrough
*/
switch a.Name {
default:
- goto none
+ return zbits
case obj.NAME_EXTERN,
obj.NAME_STATIC,
}
}
+ var node *Node
node, _ = a.Node.(*Node)
if node == nil || node.Op != ONAME || node.Orig == nil {
- goto none
+ return zbits
}
node = node.Orig
if node.Orig != node {
Fatal("%v: bad node", Ctxt.Dconv(a))
}
if node.Sym == nil || node.Sym.Name[0] == '.' {
- goto none
+ return zbits
}
- et = int(a.Etype)
- o = a.Offset
- w = a.Width
+ et := int(a.Etype)
+ o := a.Offset
+ w := a.Width
if w < 0 {
Fatal("bad width %d for %v", w, Ctxt.Dconv(a))
}
- flag = 0
+ flag := 0
+ var v *Var
for i := 0; i < nvar; i++ {
v = &var_[i:][0]
if v.node == node && int(v.name) == n {
switch et {
case 0,
TFUNC:
- goto none
+ return zbits
}
if nvar >= NVAR {
}
}
- goto none
+ return zbits
}
- i = nvar
+ i := nvar
nvar++
v = &var_[i:][0]
v.id = i
node.Opt = v
- bit = blsh(uint(i))
+ bit := blsh(uint(i))
if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
for z := 0; z < BITS; z++ {
externs.b[z] |= bit.b[z]
Ostats.Nvar++
return bit
-
-none:
- return zbits
}
func prop(f *Flow, ref Bits, cal Bits) {
func simplename(n *Node) bool {
if n.Op != ONAME {
- goto no
+ return false
}
if n.Addable == 0 {
- goto no
+ return false
}
if n.Class&PHEAP != 0 {
- goto no
+ return false
}
if n.Class == PPARAMREF {
- goto no
+ return false
}
return true
-
-no:
- return false
}
func litas(l *Node, r *Node, init **NodeList) {
}
func oaslit(n *Node, init **NodeList) bool {
- var ctxt int
-
if n.Left == nil || n.Right == nil {
- goto no
+ // not a special composit literal assignment
+ return false
}
if n.Left.Type == nil || n.Right.Type == nil {
- goto no
+ // not a special composit literal assignment
+ return false
}
if !simplename(n.Left) {
- goto no
+ // not a special composit literal assignment
+ return false
}
if !Eqtype(n.Left.Type, n.Right.Type) {
- goto no
+ // not a special composit literal assignment
+ return false
}
// context is init() function.
// implies generated data executed
// exactly once and not subject to races.
- ctxt = 0
+ ctxt := 0
// if(n->dodata == 1)
// ctxt = 1;
switch n.Right.Op {
default:
- goto no
+ // not a special composit literal assignment
+ return false
case OSTRUCTLIT,
OARRAYLIT,
OMAPLIT:
if vmatch1(n.Left, n.Right) {
- goto no
+ // not a special composit literal assignment
+ return false
}
anylit(ctxt, n.Right, n.Left, init)
}
n.Op = OEMPTY
return true
-
- // not a special composit literal assignment
-no:
- return false
}
func getlit(lit *Node) int {
func stataddr(nam *Node, n *Node) bool {
if n == nil {
- goto no
+ return false
}
switch n.Op {
return true
}
-no:
return false
}
var nr *Node
var nl *Node
var nam Node
- var nod1 Node
if n.Dodata == 0 {
goto no
if nam.Class != PEXTERN {
goto no
}
- goto yes
+ return true
}
if nr.Type == nil || !Eqtype(nl.Type, nr.Type) {
case OSLICEARR:
if nr.Right.Op == OKEY && nr.Right.Left == nil && nr.Right.Right == nil {
nr = nr.Left
- goto slice
+ gused(nil) // in case the data is the dest of a goto
+ nl := nr
+ if nr == nil || nr.Op != OADDR {
+ goto no
+ }
+ nr = nr.Left
+ if nr == nil || nr.Op != ONAME {
+ goto no
+ }
+
+ // nr is the array being converted to a slice
+ if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
+ goto no
+ }
+
+ nam.Xoffset += int64(Array_array)
+ gdata(&nam, nl, int(Types[Tptr].Width))
+
+ nam.Xoffset += int64(Array_nel) - int64(Array_array)
+ var nod1 Node
+ Nodconst(&nod1, Types[TINT], nr.Type.Bound)
+ gdata(&nam, &nod1, Widthint)
+
+ nam.Xoffset += int64(Array_cap) - int64(Array_nel)
+ gdata(&nam, &nod1, Widthint)
+
+ return true
}
goto no
gdatastring(&nam, nr.Val.U.Sval)
}
-yes:
return true
-slice:
- gused(nil) // in case the data is the dest of a goto
- nl = nr
- if nr == nil || nr.Op != OADDR {
- goto no
- }
- nr = nr.Left
- if nr == nil || nr.Op != ONAME {
- goto no
- }
-
- // nr is the array being converted to a slice
- if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
- goto no
- }
-
- nam.Xoffset += int64(Array_array)
- gdata(&nam, nl, int(Types[Tptr].Width))
-
- nam.Xoffset += int64(Array_nel) - int64(Array_array)
- Nodconst(&nod1, Types[TINT], nr.Type.Bound)
- gdata(&nam, &nod1, Widthint)
-
- nam.Xoffset += int64(Array_cap) - int64(Array_nel)
- gdata(&nam, &nod1, Widthint)
-
- goto yes
-
no:
if n.Dodata == 2 {
Dump("\ngen_as_init", n)
Fatal("struct/interface missing field: %v %v", Tconv(t1, 0), Tconv(t2, 0))
}
if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
- goto no
+ return false
}
}
if t1 == nil && t2 == nil {
- goto yes
+ return true
}
- goto no
+ return false
// Loop over structs: receiver, in, out.
case TFUNC:
Fatal("func struct missing field: %v %v", Tconv(ta, 0), Tconv(tb, 0))
}
if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
- goto no
+ return false
}
}
if ta != nil || tb != nil {
- goto no
+ return false
}
}
if t1 == nil && t2 == nil {
- goto yes
+ return true
}
- goto no
+ return false
case TARRAY:
if t1.Bound != t2.Bound {
- goto no
+ return false
}
case TCHAN:
if t1.Chan != t2.Chan {
- goto no
+ return false
}
}
if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
- goto yes
+ return true
}
- goto no
-
-yes:
- return true
-
-no:
return false
}
}
func subtype(stp **Type, t *Type, d int) bool {
- var st *Type
-
loop:
- st = *stp
+ st := *stp
if st == nil {
return false
}
t = n.Type
if t == nil {
- goto rnil
+ return nil
}
if t.Etype != TFIELD {
bad:
Fatal("structfirst: not struct %v", Tconv(n, 0))
-rnil:
return nil
}
n := s.T
t := n.Down
if t == nil {
- goto rnil
+ return nil
}
if t.Etype != TFIELD {
- goto bad
+ Fatal("structnext: not struct %v", Tconv(n, 0))
+
+ return nil
}
s.T = t
return t
-
-bad:
- Fatal("structnext: not struct %v", Tconv(n, 0))
-
-rnil:
- return nil
}
/*
// will give shortest unique addressing.
// modify the tree with missing type names.
func adddot(n *Node) *Node {
- var s *Sym
- var c int
- var d int
-
typecheck(&n.Left, Etype|Erv)
n.Diag |= n.Left.Diag
t := n.Left.Type
if t == nil {
- goto ret
+ return n
}
if n.Left.Op == OTYPE {
- goto ret
+ return n
}
if n.Right.Op != ONAME {
- goto ret
+ return n
}
- s = n.Right.Sym
+ s := n.Right.Sym
if s == nil {
- goto ret
+ return n
}
- for d = 0; d < len(dotlist); d++ {
+ var c int
+ for d := 0; d < len(dotlist); d++ {
c = adddot1(s, t, d, nil, 0)
if c > 0 {
- goto out
- }
- }
-
- goto ret
+ if c > 1 {
+ Yyerror("ambiguous selector %v", Nconv(n, 0))
+ n.Left = nil
+ return n
+ }
-out:
- if c > 1 {
- Yyerror("ambiguous selector %v", Nconv(n, 0))
- n.Left = nil
- return n
- }
+ // rebuild elided dots
+ for c := d - 1; c >= 0; c-- {
+ if n.Left.Type != nil && Isptr[n.Left.Type.Etype] != 0 {
+ n.Left.Implicit = 1
+ }
+ n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
+ }
- // rebuild elided dots
- for c := d - 1; c >= 0; c-- {
- if n.Left.Type != nil && Isptr[n.Left.Type.Etype] != 0 {
- n.Left.Implicit = 1
+ return n
}
- n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
}
-ret:
return n
}
* 1000+ if it is a -(power of 2)
*/
func powtwo(n *Node) int {
- var v uint64
- var b uint64
-
if n == nil || n.Op != OLITERAL || n.Type == nil {
- goto no
+ return -1
}
if Isint[n.Type.Etype] == 0 {
- goto no
+ return -1
}
- v = uint64(Mpgetfix(n.Val.U.Xval))
- b = 1
+ v := uint64(Mpgetfix(n.Val.U.Xval))
+ b := uint64(1)
for i := 0; i < 64; i++ {
if b == v {
return i
}
if Issigned[n.Type.Etype] == 0 {
- goto no
+ return -1
}
v = -v
b = b << 1
}
-no:
return -1
}
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
- goto escape
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
}
}
return s
-
-escape:
- var buf bytes.Buffer
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
- fmt.Fprintf(&buf, "%%%02x", c)
- continue
- }
- buf.WriteByte(c)
- }
- return buf.String()
}
func mkpkg(path_ *Strlit) *Pkg {
arith:
if op == OLSH || op == ORSH {
- goto shift
+ defaultlit(&r, Types[TUINT])
+ n.Right = r
+ t := r.Type
+ if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
+ Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
+ goto error
+ }
+
+ t = l.Type
+ if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
+ Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ // no defaultlit for left
+ // the outer context gives the type
+ n.Type = l.Type
+
+ goto ret
}
// ideal mixed with non-ideal
n.Type = t
goto ret
-shift:
- defaultlit(&r, Types[TUINT])
- n.Right = r
- t = r.Type
- if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
- Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
- goto error
- }
-
- t = l.Type
- if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
- Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
- goto error
- }
-
- // no defaultlit for left
- // the outer context gives the type
- n.Type = l.Type
-
- goto ret
-
doconv:
ok |= Erv
saveorignode(n)
* rewrite with a constant
*/
func unsafenmagic(nn *Node) *Node {
- var r *Node
- var s *Sym
- var v int64
-
fn := nn.Left
args := nn.List
if safemode != 0 || fn == nil || fn.Op != ONAME {
- goto no
+ return nil
}
- s = fn.Sym
+ s := fn.Sym
if s == nil {
- goto no
+ return nil
}
if s.Pkg != unsafepkg {
- goto no
+ return nil
}
if args == nil {
Yyerror("missing argument for %v", Sconv(s, 0))
- goto no
+ return nil
}
- r = args.N
+ r := args.N
+ var v int64
if s.Name == "Sizeof" {
typecheck(&r, Erv)
defaultlit(&r, nil)
goto yes
}
-no:
return nil
bad:
func plan9quote(s string) string {
if s == "" {
- goto needquote
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
for i := 0; i < len(s); i++ {
if s[i] <= ' ' || s[i] == '\'' {
- goto needquote
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
}
return s
-
-needquote:
- return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
// simulation of int(*s++) in C
if sl >= 0 {
sr := int(Mpgetfix(r.Right.Val.U.Xval))
if sr >= 0 && sl+sr == w {
- goto yes
+ // Rewrite left shift half to left rotate.
+ if l.Op == OLSH {
+ n = l
+ } else {
+ n = r
+ }
+ n.Op = OLROT
+
+ // Remove rotate 0 and rotate w.
+ s := int(Mpgetfix(n.Right.Val.U.Xval))
+
+ if s == 0 || s == w {
+ n = n.Left
+ }
+
+ *np = n
+ return
}
}
return
// TODO: Could allow s and 32-s if s is bounded (maybe s&31 and 32-s&31).
return
-
- // Rewrite left shift half to left rotate.
-yes:
- if l.Op == OLSH {
- n = l
- } else {
- n = r
- }
- n.Op = OLROT
-
- // Remove rotate 0 and rotate w.
- s := int(Mpgetfix(n.Right.Val.U.Xval))
-
- if s == 0 || s == w {
- n = n.Left
- }
-
- *np = n
- return
}
/*
return
}
- var n1 *Node
- var m Magic
- var n2 *Node
if pow < 0 {
- goto divbymul
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ // TODO: support 64-bit magic multiply here.
+ var m Magic
+ m.W = w
+
+ if Issigned[nl.Type.Etype] != 0 {
+ m.Sd = Mpgetfix(nr.Val.U.Xval)
+ Smagic(&m)
+ } else {
+ m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
+ Umagic(&m)
+ }
+
+ if m.Bad != 0 {
+ return
+ }
+
+ // We have a quick division method so use it
+ // for modulo too.
+ if n.Op == OMOD {
+ // rewrite as A%B = A - (A/B*B).
+ n1 := Nod(ODIV, nl, nr)
+
+ n2 := Nod(OMUL, n1, nr)
+ n = Nod(OSUB, nl, n2)
+ goto ret
+ }
+
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ // n1 = nl * magic >> w (HMUL)
+ case TUINT8,
+ TUINT16,
+ TUINT32:
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, int64(m.Um))
+ n1 := Nod(OMUL, nl, nc)
+ typecheck(&n1, Erv)
+ n1.Op = OHMUL
+ if m.Ua != 0 {
+ // Select a Go type with (at least) twice the width.
+ var twide *Type
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ case TUINT8,
+ TUINT16:
+ twide = Types[TUINT32]
+
+ case TUINT32:
+ twide = Types[TUINT64]
+
+ case TINT8,
+ TINT16:
+ twide = Types[TINT32]
+
+ case TINT32:
+ twide = Types[TINT64]
+ }
+
+ // add numerator (might overflow).
+ // n2 = (n1 + nl)
+ n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
+
+ // shift by m.s
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = conv(Nod(ORSH, n2, nc), nl.Type)
+ } else {
+ // n = n1 >> m.s
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = Nod(ORSH, n1, nc)
+ }
+
+ // n1 = nl * magic >> w
+ case TINT8,
+ TINT16,
+ TINT32:
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, m.Sm)
+ n1 := Nod(OMUL, nl, nc)
+ typecheck(&n1, Erv)
+ n1.Op = OHMUL
+ if m.Sm < 0 {
+ // add the numerator.
+ n1 = Nod(OADD, n1, nl)
+ }
+
+ // shift by m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n2 := conv(Nod(ORSH, n1, nc), nl.Type)
+
+ // add 1 iff n1 is negative.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(w)-1)
+ n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
+ n = Nod(OSUB, n2, n3)
+
+ // apply sign.
+ if m.Sd < 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+ }
+
+ goto ret
}
switch pow {
goto ret
- // try to do division by multiply by (2^w)/d
- // see hacker's delight chapter 10
- // TODO: support 64-bit magic multiply here.
-divbymul:
- m.W = w
-
- if Issigned[nl.Type.Etype] != 0 {
- m.Sd = Mpgetfix(nr.Val.U.Xval)
- Smagic(&m)
- } else {
- m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
- Umagic(&m)
- }
-
- if m.Bad != 0 {
- return
- }
-
- // We have a quick division method so use it
- // for modulo too.
- if n.Op == OMOD {
- goto longmod
- }
-
- switch Simtype[nl.Type.Etype] {
- default:
- return
-
- // n1 = nl * magic >> w (HMUL)
- case TUINT8,
- TUINT16,
- TUINT32:
- nc := Nod(OXXX, nil, nil)
-
- Nodconst(nc, nl.Type, int64(m.Um))
- n1 := Nod(OMUL, nl, nc)
- typecheck(&n1, Erv)
- n1.Op = OHMUL
- if m.Ua != 0 {
- // Select a Go type with (at least) twice the width.
- var twide *Type
- switch Simtype[nl.Type.Etype] {
- default:
- return
-
- case TUINT8,
- TUINT16:
- twide = Types[TUINT32]
-
- case TUINT32:
- twide = Types[TUINT64]
-
- case TINT8,
- TINT16:
- twide = Types[TINT32]
-
- case TINT32:
- twide = Types[TINT64]
- }
-
- // add numerator (might overflow).
- // n2 = (n1 + nl)
- n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
-
- // shift by m.s
- nc := Nod(OXXX, nil, nil)
-
- Nodconst(nc, Types[TUINT], int64(m.S))
- n = conv(Nod(ORSH, n2, nc), nl.Type)
- } else {
- // n = n1 >> m.s
- nc := Nod(OXXX, nil, nil)
-
- Nodconst(nc, Types[TUINT], int64(m.S))
- n = Nod(ORSH, n1, nc)
- }
-
- // n1 = nl * magic >> w
- case TINT8,
- TINT16,
- TINT32:
- nc := Nod(OXXX, nil, nil)
-
- Nodconst(nc, nl.Type, m.Sm)
- n1 := Nod(OMUL, nl, nc)
- typecheck(&n1, Erv)
- n1.Op = OHMUL
- if m.Sm < 0 {
- // add the numerator.
- n1 = Nod(OADD, n1, nl)
- }
-
- // shift by m.s
- nc = Nod(OXXX, nil, nil)
-
- Nodconst(nc, Types[TUINT], int64(m.S))
- n2 := conv(Nod(ORSH, n1, nc), nl.Type)
-
- // add 1 iff n1 is negative.
- nc = Nod(OXXX, nil, nil)
-
- Nodconst(nc, Types[TUINT], int64(w)-1)
- n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
- n = Nod(OSUB, n2, n3)
-
- // apply sign.
- if m.Sd < 0 {
- n = Nod(OMINUS, n, nil)
- }
- }
-
- goto ret
-
- // rewrite as A%B = A - (A/B*B).
-longmod:
- n1 = Nod(ODIV, nl, nr)
-
- n2 = Nod(OMUL, n1, nr)
- n = Nod(OSUB, nl, n2)
- goto ret
-
ret:
typecheck(&n, Erv)
walkexpr(&n, init)
}
func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
- var off int64
-
- off = s.Size
+ off := s.Size
setuintxx(ctxt, s, off, v, int64(wid))
return off
}
}
func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
- var i int64
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
- i = s.Size
+ i := s.Size
s.Size += int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = uint8(ctxt.Arch.Ptrsize)
}
func Addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
- var i int64
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
- i = s.Size
+ i := s.Size
s.Size += 4
Symgrow(ctxt, s, s.Size)
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Add = add
}
func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
Symgrow(ctxt, s, s.Size)
}
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(off)
r.Siz = uint8(ctxt.Arch.Ptrsize)
}
func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
- var i int64
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
- i = s.Size
+ i := s.Size
s.Size += int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = uint8(ctxt.Arch.Ptrsize)
}
func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
- var i int64
- var r *Reloc
-
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
- i = s.Size
+ i := s.Size
s.Size += 4
Symgrow(ctxt, s, s.Size)
- r = Addrel(s)
+ r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = 4
}
func listsort(l *LSym, cmp func(*LSym, *LSym) int, nextp func(*LSym) **LSym) *LSym {
- var l1 *LSym
- var l2 *LSym
- var le *LSym
-
if l == nil || *nextp(l) == nil {
return l
}
- l1 = l
- l2 = l
+ l1 := l
+ l2 := l
for {
l2 = *nextp(l2)
if l2 == nil {
l2 = *nextp(l2)
}
- le = l
+ le := l
for {
if l1 == nil {
var r *Reloc
var rs *LSym
var i16 int16
- var ri int32
var off int32
var siz int32
var fl int32
var o int64
Ctxt.Cursym = s
- for ri = 0; ri < int32(len(s.R)); ri++ {
+ for ri := int32(0); ri < int32(len(s.R)); ri++ {
r = &s.R[ri]
r.Done = 1
off = r.Off
}
func reloc() {
- var s *LSym
-
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f reloc\n", obj.Cputime())
}
Bflush(&Bso)
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
relocsym(s)
}
- for s = datap; s != nil; s = s.Next {
+ for s := datap; s != nil; s = s.Next {
relocsym(s)
}
}
func dynrelocsym(s *LSym) {
- var ri int
- var r *Reloc
-
if HEADTYPE == Hwindows {
- var rel *LSym
- var targ *LSym
-
- rel = Linklookup(Ctxt, ".rel", 0)
+ rel := Linklookup(Ctxt, ".rel", 0)
if s == rel {
return
}
- for ri = 0; ri < len(s.R); ri++ {
+ var r *Reloc
+ var targ *LSym
+ for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
targ = r.Sym
if targ == nil {
return
}
- for ri = 0; ri < len(s.R); ri++ {
+ var r *Reloc
+ for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
if r.Sym != nil && r.Sym.Type == SDYNIMPORT || r.Type >= 256 {
if r.Sym != nil && !r.Sym.Reachable {
}
func dynreloc() {
- var s *LSym
-
// -d suppresses dynamic loader format, so we may as well not
// compute these sections or mark their symbols as reachable.
if Debug['d'] != 0 && HEADTYPE != Hwindows {
}
Bflush(&Bso)
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
dynrelocsym(s)
}
- for s = datap; s != nil; s = s.Next {
+ for s := datap; s != nil; s = s.Next {
dynrelocsym(s)
}
if Iself {
func blk(start *LSym, addr int64, size int64) {
var sym *LSym
- var eaddr int64
- var p []byte
- var ep []byte
for sym = start; sym != nil; sym = sym.Next {
if sym.Type&SSUB == 0 && sym.Value >= addr {
}
}
- eaddr = addr + size
+ eaddr := addr + size
+ var ep []byte
+ var p []byte
for ; sym != nil; sym = sym.Next {
if sym.Type&SSUB != 0 {
continue
}
func Codeblk(addr int64, size int64) {
- var sym *LSym
- var eaddr int64
- var n int64
- var q []byte
-
if Debug['a'] != 0 {
fmt.Fprintf(&Bso, "codeblk [%#x,%#x) at offset %#x\n", addr, addr+size, Cpos())
}
return
}
+ var sym *LSym
for sym = Ctxt.Textp; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
}
}
- eaddr = addr + size
+ eaddr := addr + size
+ var n int64
+ var q []byte
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
}
func Datblk(addr int64, size int64) {
- var sym *LSym
- var i int64
- var eaddr int64
- var p []byte
- var ep []byte
- var typ string
- var rsname string
- var r *Reloc
-
if Debug['a'] != 0 {
fmt.Fprintf(&Bso, "datblk [%#x,%#x) at offset %#x\n", addr, addr+size, Cpos())
}
return
}
+ var sym *LSym
for sym = datap; sym != nil; sym = sym.Next {
if sym.Value >= addr {
break
}
}
- eaddr = addr + size
+ eaddr := addr + size
+ var ep []byte
+ var i int64
+ var p []byte
+ var r *Reloc
+ var rsname string
+ var typ string
for ; sym != nil; sym = sym.Next {
if sym.Value >= eaddr {
break
}
func addstrdata(name string, value string) {
- var s *LSym
- var sp *LSym
- var p string
- var reachable bool
-
- p = fmt.Sprintf("%s.str", name)
- sp = Linklookup(Ctxt, p, 0)
+ p := fmt.Sprintf("%s.str", name)
+ sp := Linklookup(Ctxt, p, 0)
Addstring(sp, value)
sp.Type = SRODATA
- s = Linklookup(Ctxt, name, 0)
+ s := Linklookup(Ctxt, name, 0)
s.Size = 0
s.Dupok = 1
- reachable = s.Reachable
+ reachable := s.Reachable
Addaddr(Ctxt, s, sp)
adduintxx(Ctxt, s, uint64(len(value)), Thearch.Ptrsize)
}
func Addstring(s *LSym, str string) int64 {
- var n int
- var r int32
-
if s.Type == 0 {
s.Type = SNOPTRDATA
}
s.Reachable = true
- r = int32(s.Size)
- n = len(str) + 1
+ r := int32(s.Size)
+ n := len(str) + 1
if s.Name == ".shstrtab" {
elfsetstring(str, int(r))
}
}
func dosymtype() {
- var s *LSym
-
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if len(s.P) > 0 {
if s.Type == SBSS {
s.Type = SDATA
}
func symalign(s *LSym) int32 {
- var align int32
-
if s.Align != 0 {
return s.Align
}
- align = int32(Thearch.Maxalign)
+ align := int32(Thearch.Maxalign)
for int64(align) > s.Size && align > 1 {
align >>= 1
}
// the list of symbols s; the list stops when s->type exceeds type.
func maxalign(s *LSym, type_ int) int32 {
var align int32
- var max int32
- max = 0
+ max := int32(0)
for ; s != nil && int(s.Type) <= type_; s = s.Next {
align = symalign(s)
if max < align {
// Writes insData block from g->data.
func proggendataflush(g *ProgGen) {
- var i int32
- var s int32
-
if g.datasize == 0 {
return
}
proggenemit(g, obj.InsData)
proggenemit(g, uint8(g.datasize))
- s = (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
- for i = 0; i < s; i++ {
+ s := (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
+ for i := int32(0); i < s; i++ {
proggenemit(g, g.data[i])
}
g.datasize = 0
// Skip v bytes due to alignment, etc.
func proggenskip(g *ProgGen, off int64, v int64) {
- var i int64
-
- for i = off; i < off+v; i++ {
+ for i := off; i < off+v; i++ {
if (i % int64(Thearch.Ptrsize)) == 0 {
proggendata(g, obj.BitsScalar)
}
// This function generates GC pointer info for global variables.
func proggenaddsym(g *ProgGen, s *LSym) {
- var gcprog *LSym
- var mask []byte
- var i int64
- var size int64
-
if s.Size == 0 {
return
}
if (s.Size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned conservative symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
- size = (s.Size + int64(Thearch.Ptrsize) - 1) / int64(Thearch.Ptrsize) * int64(Thearch.Ptrsize)
+ size := (s.Size + int64(Thearch.Ptrsize) - 1) / int64(Thearch.Ptrsize) * int64(Thearch.Ptrsize)
if size < int64(32*Thearch.Ptrsize) {
// Emit small symbols as data.
- for i = 0; i < size/int64(Thearch.Ptrsize); i++ {
+ for i := int64(0); i < size/int64(Thearch.Ptrsize); i++ {
proggendata(g, obj.BitsPointer)
}
} else {
if s.Size < int64(32*Thearch.Ptrsize) {
// Emit small symbols as data.
// This case also handles unaligned and tiny symbols, so tread carefully.
- for i = s.Value; i < s.Value+s.Size; i++ {
+ for i := s.Value; i < s.Value+s.Size; i++ {
if (i % int64(Thearch.Ptrsize)) == 0 {
proggendata(g, obj.BitsScalar)
}
// gc program, copy directly
proggendataflush(g)
- gcprog = decodetype_gcprog(s.Gotype)
- size = decodetype_size(s.Gotype)
+ gcprog := decodetype_gcprog(s.Gotype)
+ size := decodetype_size(s.Gotype)
if (size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned gcprog symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
- for i = 0; i < int64(len(gcprog.P)-1); i++ {
+ for i := int64(0); i < int64(len(gcprog.P)-1); i++ {
proggenemit(g, uint8(gcprog.P[i]))
}
g.pos = s.Value + size
} else {
// gc mask, it's small so emit as data
- mask = decodetype_gcmask(s.Gotype)
+ mask := decodetype_gcmask(s.Gotype)
- size = decodetype_size(s.Gotype)
+ size := decodetype_size(s.Gotype)
if (size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned gcmask symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
- for i = 0; i < size; i += int64(Thearch.Ptrsize) {
+ for i := int64(0); i < size; i += int64(Thearch.Ptrsize) {
proggendata(g, uint8((mask[i/int64(Thearch.Ptrsize)/2]>>uint64((i/int64(Thearch.Ptrsize)%2)*4+2))&obj.BitsMask))
}
g.pos = s.Value + size
}
func growdatsize(datsizep *int64, s *LSym) {
- var datsize int64
-
- datsize = *datsizep
+ datsize := *datsizep
if s.Size < 0 {
Diag("negative size (datsize = %d, s->size = %d)", datsize, s.Size)
}
}
func dodata() {
- var n int32
- var datsize int64
- var sect *Section
- var segro *Segment
- var s *LSym
- var last *LSym
- var l **LSym
- var toc *LSym
- var gcdata *LSym
- var gcbss *LSym
- var gen ProgGen
-
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f dodata\n", obj.Cputime())
}
Bflush(&Bso)
- last = nil
+ last := (*LSym)(nil)
datap = nil
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Special != 0 {
continue
}
}
}
- for s = datap; s != nil; s = s.Next {
+ for s := datap; s != nil; s = s.Next {
if int64(len(s.P)) > s.Size {
Diag("%s: initialize bounds (%d < %d)", s.Name, int64(s.Size), len(s.P))
}
dynreloc()
/* some symbols may no longer belong in datap (Mach-O) */
+ var l **LSym
+ var s *LSym
for l = &datap; ; {
s = *l
if s == nil {
}
/* writable ELF sections */
- datsize = 0
+ datsize := int64(0)
+ var sect *Section
for ; s != nil && s.Type < SELFGOT; s = s.Next {
sect = addsection(&Segdata, s.Name, 06)
sect.Align = symalign(s)
/* .got (and .toc on ppc64) */
if s.Type == SELFGOT {
- sect = addsection(&Segdata, ".got", 06)
+ sect := addsection(&Segdata, ".got", 06)
sect.Align = maxalign(s, SELFGOT)
datsize = Rnd(datsize, int64(sect.Align))
sect.Vaddr = uint64(datsize)
+ var toc *LSym
for ; s != nil && s.Type == SELFGOT; s = s.Next {
datsize = aligndatsize(datsize, s)
s.Sect = sect
/* shared library initializer */
if Flag_shared != 0 {
- sect = addsection(&Segdata, ".init_array", 06)
+ sect := addsection(&Segdata, ".init_array", 06)
sect.Align = maxalign(s, SINITARR)
datsize = Rnd(datsize, int64(sect.Align))
sect.Vaddr = uint64(datsize)
sect.Vaddr = uint64(datsize)
Linklookup(Ctxt, "runtime.data", 0).Sect = sect
Linklookup(Ctxt, "runtime.edata", 0).Sect = sect
- gcdata = Linklookup(Ctxt, "runtime.gcdata", 0)
+ gcdata := Linklookup(Ctxt, "runtime.gcdata", 0)
+ var gen ProgGen
proggeninit(&gen, gcdata)
for ; s != nil && s.Type < SBSS; s = s.Next {
if s.Type == SINITARR {
sect.Vaddr = uint64(datsize)
Linklookup(Ctxt, "runtime.bss", 0).Sect = sect
Linklookup(Ctxt, "runtime.ebss", 0).Sect = sect
- gcbss = Linklookup(Ctxt, "runtime.gcbss", 0)
+ gcbss := Linklookup(Ctxt, "runtime.gcbss", 0)
proggeninit(&gen, gcbss)
for ; s != nil && s.Type < SNOPTRBSS; s = s.Next {
s.Sect = sect
}
if Iself && Linkmode == LinkExternal && s != nil && s.Type == STLSBSS && HEADTYPE != Hopenbsd {
- sect = addsection(&Segdata, ".tbss", 06)
+ sect := addsection(&Segdata, ".tbss", 06)
sect.Align = int32(Thearch.Ptrsize)
sect.Vaddr = 0
datsize = 0
* since it's not our decision; that code expects the sections in
* segtext.
*/
+ var segro *Segment
if Iself && Linkmode == LinkInternal {
segro = &Segrodata
} else {
}
/* number the sections */
- n = 1
+ n := int32(1)
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
// assign addresses to text
func textaddress() {
- var va uint64
- var sect *Section
- var sym *LSym
var sub *LSym
addsection(&Segtext, ".text", 05)
// Assign PCs in text segment.
// Could parallelize, by assigning to text
// and then letting threads copy down, but probably not worth it.
- sect = Segtext.Sect
+ sect := Segtext.Sect
sect.Align = int32(Funcalign)
Linklookup(Ctxt, "runtime.text", 0).Sect = sect
Linklookup(Ctxt, "runtime.etext", 0).Sect = sect
- va = uint64(INITTEXT)
+ va := uint64(INITTEXT)
sect.Vaddr = va
- for sym = Ctxt.Textp; sym != nil; sym = sym.Next {
+ for sym := Ctxt.Textp; sym != nil; sym = sym.Next {
sym.Sect = sect
if sym.Type&SSUB != 0 {
continue
// assign addresses
func address() {
- var s *Section
- var text *Section
- var data *Section
- var rodata *Section
- var symtab *Section
- var pclntab *Section
- var noptr *Section
- var bss *Section
- var noptrbss *Section
- var typelink *Section
- var sym *LSym
- var sub *LSym
- var va uint64
- var vlen int64
-
- va = uint64(INITTEXT)
+ va := uint64(INITTEXT)
Segtext.Rwx = 05
Segtext.Vaddr = va
Segtext.Fileoff = uint64(HEADR)
- for s = Segtext.Sect; s != nil; s = s.Next {
+ for s := Segtext.Sect; s != nil; s = s.Next {
va = uint64(Rnd(int64(va), int64(s.Align)))
s.Vaddr = va
va += s.Length
Segrodata.Vaddr = va
Segrodata.Fileoff = va - Segtext.Vaddr + Segtext.Fileoff
Segrodata.Filelen = 0
- for s = Segrodata.Sect; s != nil; s = s.Next {
+ for s := Segrodata.Sect; s != nil; s = s.Next {
va = uint64(Rnd(int64(va), int64(s.Align)))
s.Vaddr = va
va += s.Length
if HEADTYPE == Hplan9 {
Segdata.Fileoff = Segtext.Fileoff + Segtext.Filelen
}
- data = nil
- noptr = nil
- bss = nil
- noptrbss = nil
- for s = Segdata.Sect; s != nil; s = s.Next {
+ data := (*Section)(nil)
+ noptr := (*Section)(nil)
+ bss := (*Section)(nil)
+ noptrbss := (*Section)(nil)
+ var vlen int64
+ for s := Segdata.Sect; s != nil; s = s.Next {
vlen = int64(s.Length)
if s.Next != nil {
vlen = int64(s.Next.Vaddr - s.Vaddr)
Segdata.Filelen = bss.Vaddr - Segdata.Vaddr
- text = Segtext.Sect
+ text := Segtext.Sect
+ var rodata *Section
if Segrodata.Sect != nil {
rodata = Segrodata.Sect
} else {
rodata = text.Next
}
- typelink = rodata.Next
- symtab = typelink.Next
- pclntab = symtab.Next
+ typelink := rodata.Next
+ symtab := typelink.Next
+ pclntab := symtab.Next
- for sym = datap; sym != nil; sym = sym.Next {
+ var sub *LSym
+ for sym := datap; sym != nil; sym = sym.Next {
Ctxt.Cursym = sym
if sym.Sect != nil {
sym.Value += int64((sym.Sect.(*Section)).Vaddr)
xdefine("runtime.typelink", SRODATA, int64(typelink.Vaddr))
xdefine("runtime.etypelink", SRODATA, int64(typelink.Vaddr+typelink.Length))
- sym = Linklookup(Ctxt, "runtime.gcdata", 0)
+ sym := Linklookup(Ctxt, "runtime.gcdata", 0)
xdefine("runtime.egcdata", SRODATA, Symaddr(sym)+sym.Size)
Linklookup(Ctxt, "runtime.egcdata", 0).Sect = sym.Sect
// ../gc/reflect.c stuffs in these.
func decode_reloc(s *LSym, off int32) *Reloc {
- var i int
-
- for i = 0; i < len(s.R); i++ {
+ for i := 0; i < len(s.R); i++ {
if s.R[i].Off == off {
return &s.R[i:][0]
}
}
func decode_reloc_sym(s *LSym, off int32) *LSym {
- var r *Reloc
-
- r = decode_reloc(s, off)
+ r := decode_reloc(s, off)
if r == nil {
return nil
}
}
func decodetype_gcmask(s *LSym) []byte {
- var mask *LSym
-
- mask = decode_reloc_sym(s, 1*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize))
+ mask := decode_reloc_sym(s, 1*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize))
return mask.P
}
}
func decodetype_funcintype(s *LSym, i int) *LSym {
- var r *Reloc
-
- r = decode_reloc(s, int32(commonsize())+int32(Thearch.Ptrsize))
+ r := decode_reloc(s, int32(commonsize())+int32(Thearch.Ptrsize))
if r == nil {
return nil
}
}
func decodetype_funcouttype(s *LSym, i int) *LSym {
- var r *Reloc
-
- r = decode_reloc(s, int32(commonsize())+2*int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize))
+ r := decode_reloc(s, int32(commonsize())+2*int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize))
if r == nil {
return nil
}
// Type.StructType.fields[]-> name, typ and offset.
func decodetype_structfieldname(s *LSym, i int) string {
- var r *Reloc
-
// go.string."foo" 0x28 / 0x40
s = decode_reloc_sym(s, int32(commonsize())+int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize)+int32(i)*int32(structfieldsize()))
if s == nil { // embedded structs have a nil name.
return ""
}
- r = decode_reloc(s, 0) // s has a pointer to the string data at offset 0
- if r == nil { // shouldn't happen.
+ r := decode_reloc(s, 0) // s has a pointer to the string data at offset 0
+ if r == nil { // shouldn't happen.
return ""
}
return cstring(r.Sym.P[r.Add:])
func uleb128enc(v uint64, dst []byte) int {
var c uint8
- var length uint8
- length = 0
+ length := uint8(0)
for {
c = uint8(v & 0x7f)
v >>= 7
func sleb128enc(v int64, dst []byte) int {
var c uint8
var s uint8
- var length uint8
- length = 0
+ length := uint8(0)
for {
c = uint8(v & 0x7f)
s = uint8(v & 0x40)
}
func writeabbrev() {
- var i int
var j int
var f *DWAttrForm
abbrevo = Cpos()
- for i = 1; i < DW_NABRV; i++ {
+ for i := 1; i < DW_NABRV; i++ {
// See section 7.5.3
uleb128put(int64(i))
)
func dwarfhashstr(s string) uint32 {
- var h uint32
-
- h = 0
+ h := uint32(0)
for s != "" {
h = h + h + h + uint32(s[0])
s = s[1:]
var dwglobals DWDie
func newattr(die *DWDie, attr uint16, cls int, value int64, data interface{}) *DWAttr {
- var a *DWAttr
-
- a = new(DWAttr)
+ a := new(DWAttr)
a.link = die.attr
die.attr = a
a.atr = attr
// name. getattr moves the desired one to the front so
// frequently searched ones are found faster.
func getattr(die *DWDie, attr uint16) *DWAttr {
- var a *DWAttr
- var b *DWAttr
-
if die.attr.atr == attr {
return die.attr
}
- a = die.attr
- b = a.link
+ a := die.attr
+ b := a.link
for b != nil {
if b.atr == attr {
a.link = b.link
// written out if it is listed in the abbrev). If its parent is
// keeping an index, the new DIE will be inserted there.
func newdie(parent *DWDie, abbrev int, name string) *DWDie {
- var die *DWDie
- var h int
-
- die = new(DWDie)
+ die := new(DWDie)
die.abbrev = abbrev
die.link = parent.child
parent.child = die
newattr(die, DW_AT_name, DW_CLS_STRING, int64(len(name)), name)
if parent.hash != nil {
- h = int(dwarfhashstr(name))
+ h := int(dwarfhashstr(name))
die.hlink = parent.hash[h]
parent.hash[h] = die
}
}
func walktypedef(die *DWDie) *DWDie {
- var attr *DWAttr
-
// Resolve typedef if present.
if die.abbrev == DW_ABRV_TYPEDECL {
- for attr = die.attr; attr != nil; attr = attr.link {
+ for attr := die.attr; attr != nil; attr = attr.link {
if attr.atr == DW_AT_type && attr.cls == DW_CLS_REFERENCE && attr.data != nil {
return attr.data.(*DWDie)
}
}
func find_or_diag(die *DWDie, name string) *DWDie {
- var r *DWDie
- r = find(die, name)
+ r := find(die, name)
if r == nil {
Diag("dwarf find: %s %p has no %s", getattr(die, DW_AT_name).data, die, name)
Errorexit()
}
func adddwarfrel(sec *LSym, sym *LSym, offsetbase int64, siz int, addend int64) {
- var r *Reloc
-
- r = Addrel(sec)
+ r := Addrel(sec)
r.Sym = sym
r.Xsym = sym
r.Off = int32(Cpos() - offsetbase)
var fwdcount int
func putattr(abbrev int, form int, cls int, value int64, data interface{}) {
- var off int64
- var p []byte
- var i int
-
switch form {
case DW_FORM_addr: // address
if Linkmode == LinkExternal {
value &= 0xff
Cput(uint8(value))
- p = data.([]byte)
- for i = 0; int64(i) < value; i++ {
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
Cput(uint8(p[i]))
}
value &= 0xffff
Thearch.Wput(uint16(value))
- p = data.([]byte)
- for i = 0; int64(i) < value; i++ {
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
Cput(uint8(p[i]))
}
value &= 0xffffffff
Thearch.Lput(uint32(value))
- p = data.([]byte)
- for i = 0; int64(i) < value; i++ {
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
Cput(uint8(p[i]))
}
case DW_FORM_block: // block
uleb128put(value)
- p = data.([]byte)
- for i = 0; int64(i) < value; i++ {
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
Cput(uint8(p[i]))
}
Thearch.Lput(0) // invalid dwarf, gdb will complain.
}
} else {
- off = (data.(*DWDie)).offs
+ off := (data.(*DWDie)).offs
if off == 0 {
fwdcount++
}
// Note that we can (and do) add arbitrary attributes to a DIE, but
// only the ones actually listed in the Abbrev will be written out.
func putattrs(abbrev int, attr *DWAttr) {
- var af []DWAttrForm
var ap *DWAttr
- for af = abbrevs[abbrev].attr[:]; af[0].attr != 0; af = af[1:] {
+ for af := abbrevs[abbrev].attr[:]; af[0].attr != 0; af = af[1:] {
for ap = attr; ap != nil; ap = ap.link {
if ap.atr == af[0].attr {
putattr(abbrev, int(af[0].form), int(ap.cls), ap.value, ap.data)
}
func reverselist(list **DWDie) {
- var curr *DWDie
- var prev *DWDie
-
- curr = *list
- prev = nil
+ curr := *list
+ prev := (*DWDie)(nil)
for curr != nil {
var next *DWDie = curr.link
curr.link = prev
}
func reversetree(list **DWDie) {
- var die *DWDie
-
reverselist(list)
- for die = *list; die != nil; die = die.link {
+ for die := *list; die != nil; die = die.link {
if abbrevs[die.abbrev].children != 0 {
reversetree(&die.child)
}
func newmemberoffsetattr(die *DWDie, offs int32) {
var block [20]byte
- var i int
- i = 0
+ i := 0
block[i] = DW_OP_plus_uconst
i++
i += uleb128enc(uint64(offs), block[i:])
// Lookup predefined types
func lookup_or_diag(n string) *LSym {
- var s *LSym
-
- s = Linkrlookup(Ctxt, n, 0)
+ s := Linkrlookup(Ctxt, n, 0)
if s == nil || s.Size == 0 {
Diag("dwarf: missing type: %s", n)
Errorexit()
}
func dotypedef(parent *DWDie, name string, def *DWDie) {
- var die *DWDie
-
// Only emit typedefs for real names.
if strings.HasPrefix(name, "map[") {
return
// so that future lookups will find the typedef instead
// of the real definition. This hooks the typedef into any
// circular definition loops, so that gdb can understand them.
- die = newdie(parent, DW_ABRV_TYPEDECL, name)
+ die := newdie(parent, DW_ABRV_TYPEDECL, name)
newrefattr(die, DW_AT_type, def)
}
// Define gotype, for composite ones recurse into constituents.
func defgotype(gotype *LSym) *DWDie {
- var die *DWDie
- var fld *DWDie
- var s *LSym
- var name string
- var f string
- var kind uint8
- var bytesize int64
- var i int
- var nfields int
-
if gotype == nil {
return find_or_diag(&dwtypes, "<unspecified>")
}
return find_or_diag(&dwtypes, "<unspecified>")
}
- name = gotype.Name[5:] // could also decode from Type.string
+ name := gotype.Name[5:] // could also decode from Type.string
- die = find(&dwtypes, name)
+ die := find(&dwtypes, name)
if die != nil {
return die
fmt.Printf("new type: %%Y\n", gotype)
}
- kind = decodetype_kind(gotype)
- bytesize = decodetype_size(gotype)
+ kind := decodetype_kind(gotype)
+ bytesize := decodetype_size(gotype)
switch kind {
case obj.KindBool:
die = newdie(&dwtypes, DW_ABRV_ARRAYTYPE, name)
dotypedef(&dwtypes, name, die)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- s = decodetype_arrayelem(gotype)
+ s := decodetype_arrayelem(gotype)
newrefattr(die, DW_AT_type, defgotype(s))
- fld = newdie(die, DW_ABRV_ARRAYRANGE, "range")
+ fld := newdie(die, DW_ABRV_ARRAYRANGE, "range")
// use actual length not upper bound; correct for 0-length arrays.
newattr(fld, DW_AT_count, DW_CLS_CONSTANT, decodetype_arraylen(gotype), 0)
case obj.KindChan:
die = newdie(&dwtypes, DW_ABRV_CHANTYPE, name)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- s = decodetype_chanelem(gotype)
+ s := decodetype_chanelem(gotype)
newrefattr(die, DW_AT_go_elem, defgotype(s))
case obj.KindFunc:
die = newdie(&dwtypes, DW_ABRV_FUNCTYPE, name)
dotypedef(&dwtypes, name, die)
newrefattr(die, DW_AT_type, find_or_diag(&dwtypes, "void"))
- nfields = decodetype_funcincount(gotype)
- for i = 0; i < nfields; i++ {
+ nfields := decodetype_funcincount(gotype)
+ var fld *DWDie
+ var s *LSym
+ for i := 0; i < nfields; i++ {
s = decodetype_funcintype(gotype, i)
fld = newdie(die, DW_ABRV_FUNCTYPEPARAM, s.Name[5:])
newrefattr(fld, DW_AT_type, defgotype(s))
newdie(die, DW_ABRV_DOTDOTDOT, "...")
}
nfields = decodetype_funcoutcount(gotype)
- for i = 0; i < nfields; i++ {
+ for i := 0; i < nfields; i++ {
s = decodetype_funcouttype(gotype, i)
fld = newdie(die, DW_ABRV_FUNCTYPEPARAM, s.Name[5:])
newrefattr(fld, DW_AT_type, defptrto(defgotype(s)))
die = newdie(&dwtypes, DW_ABRV_IFACETYPE, name)
dotypedef(&dwtypes, name, die)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- nfields = int(decodetype_ifacemethodcount(gotype))
+ nfields := int(decodetype_ifacemethodcount(gotype))
+ var s *LSym
if nfields == 0 {
s = lookup_or_diag("type.runtime.eface")
} else {
case obj.KindMap:
die = newdie(&dwtypes, DW_ABRV_MAPTYPE, name)
- s = decodetype_mapkey(gotype)
+ s := decodetype_mapkey(gotype)
newrefattr(die, DW_AT_go_key, defgotype(s))
s = decodetype_mapvalue(gotype)
newrefattr(die, DW_AT_go_elem, defgotype(s))
case obj.KindPtr:
die = newdie(&dwtypes, DW_ABRV_PTRTYPE, name)
dotypedef(&dwtypes, name, die)
- s = decodetype_ptrelem(gotype)
+ s := decodetype_ptrelem(gotype)
newrefattr(die, DW_AT_type, defgotype(s))
case obj.KindSlice:
die = newdie(&dwtypes, DW_ABRV_SLICETYPE, name)
dotypedef(&dwtypes, name, die)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- s = decodetype_arrayelem(gotype)
+ s := decodetype_arrayelem(gotype)
newrefattr(die, DW_AT_go_elem, defgotype(s))
case obj.KindString:
die = newdie(&dwtypes, DW_ABRV_STRUCTTYPE, name)
dotypedef(&dwtypes, name, die)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
- nfields = decodetype_structfieldcount(gotype)
- for i = 0; i < nfields; i++ {
+ nfields := decodetype_structfieldcount(gotype)
+ var f string
+ var fld *DWDie
+ var s *LSym
+ for i := 0; i < nfields; i++ {
f = decodetype_structfieldname(gotype, i)
s = decodetype_structfieldtype(gotype, i)
if f == "" {
// Find or construct *T given T.
func defptrto(dwtype *DWDie) *DWDie {
- var ptrname string
- var die *DWDie
-
- ptrname = fmt.Sprintf("*%s", getattr(dwtype, DW_AT_name).data)
- die = find(&dwtypes, ptrname)
+ ptrname := fmt.Sprintf("*%s", getattr(dwtype, DW_AT_name).data)
+ die := find(&dwtypes, ptrname)
if die == nil {
die = newdie(&dwtypes, DW_ABRV_PTRTYPE, ptrname)
newrefattr(die, DW_AT_type, dwtype)
// Search children (assumed to have DW_TAG_member) for the one named
// field and set its DW_AT_type to dwtype
func substitutetype(structdie *DWDie, field string, dwtype *DWDie) {
- var child *DWDie
- var a *DWAttr
-
- child = find_or_diag(structdie, field)
+ child := find_or_diag(structdie, field)
if child == nil {
return
}
- a = getattr(child, DW_AT_type)
+ a := getattr(child, DW_AT_type)
if a != nil {
a.data = dwtype
} else {
}
func synthesizestringtypes(die *DWDie) {
- var prototype *DWDie
-
- prototype = walktypedef(defgotype(lookup_or_diag("type.runtime._string")))
+ prototype := walktypedef(defgotype(lookup_or_diag("type.runtime._string")))
if prototype == nil {
return
}
}
func synthesizeslicetypes(die *DWDie) {
- var prototype *DWDie
- var elem *DWDie
-
- prototype = walktypedef(defgotype(lookup_or_diag("type.runtime.slice")))
+ prototype := walktypedef(defgotype(lookup_or_diag("type.runtime.slice")))
if prototype == nil {
return
}
+ var elem *DWDie
for ; die != nil; die = die.link {
if die.abbrev != DW_ABRV_SLICETYPE {
continue
func mkinternaltypename(base string, arg1 string, arg2 string) string {
var buf string
- var n string
if arg2 == "" {
buf = fmt.Sprintf("%s<%s>", base, arg1)
} else {
buf = fmt.Sprintf("%s<%s,%s>", base, arg1, arg2)
}
- n = buf
+ n := buf
return n
}
)
func synthesizemaptypes(die *DWDie) {
- var hash *DWDie
- var bucket *DWDie
+ hash := walktypedef(defgotype(lookup_or_diag("type.runtime.hmap")))
+ bucket := walktypedef(defgotype(lookup_or_diag("type.runtime.bmap")))
+
+ if hash == nil {
+ return
+ }
+
+ var a *DWAttr
var dwh *DWDie
+ var dwhb *DWDie
var dwhk *DWDie
var dwhv *DWDie
- var dwhb *DWDie
- var keytype *DWDie
- var valtype *DWDie
var fld *DWDie
- var t *DWDie
var indirect_key int
var indirect_val int
var keysize int
+ var keytype *DWDie
+ var t *DWDie
var valsize int
- var a *DWAttr
-
- hash = walktypedef(defgotype(lookup_or_diag("type.runtime.hmap")))
- bucket = walktypedef(defgotype(lookup_or_diag("type.runtime.bmap")))
-
- if hash == nil {
- return
- }
-
+ var valtype *DWDie
for ; die != nil; die = die.link {
if die.abbrev != DW_ABRV_MAPTYPE {
continue
}
func synthesizechantypes(die *DWDie) {
- var sudog *DWDie
- var waitq *DWDie
- var hchan *DWDie
- var dws *DWDie
- var dww *DWDie
- var dwh *DWDie
- var elemtype *DWDie
- var a *DWAttr
- var elemsize int
- var sudogsize int
-
- sudog = walktypedef(defgotype(lookup_or_diag("type.runtime.sudog")))
- waitq = walktypedef(defgotype(lookup_or_diag("type.runtime.waitq")))
- hchan = walktypedef(defgotype(lookup_or_diag("type.runtime.hchan")))
+ sudog := walktypedef(defgotype(lookup_or_diag("type.runtime.sudog")))
+ waitq := walktypedef(defgotype(lookup_or_diag("type.runtime.waitq")))
+ hchan := walktypedef(defgotype(lookup_or_diag("type.runtime.hchan")))
if sudog == nil || waitq == nil || hchan == nil {
return
}
- sudogsize = int(getattr(sudog, DW_AT_byte_size).value)
+ sudogsize := int(getattr(sudog, DW_AT_byte_size).value)
+ var a *DWAttr
+ var dwh *DWDie
+ var dws *DWDie
+ var dww *DWDie
+ var elemsize int
+ var elemtype *DWDie
for ; die != nil; die = die.link {
if die.abbrev != DW_ABRV_CHANTYPE {
continue
// For use with pass.c::genasmsym
func defdwsymb(sym *LSym, s string, t int, v int64, size int64, ver int, gotype *LSym) {
- var dv *DWDie
- var dt *DWDie
-
if strings.HasPrefix(s, "go.string.") {
return
}
return
}
- dv = nil
+ dv := (*DWDie)(nil)
+ var dt *DWDie
switch t {
default:
return
}
func movetomodule(parent *DWDie) {
- var die *DWDie
-
- die = dwroot.child.child
+ die := dwroot.child.child
for die.link != nil {
die = die.link
}
// If the pcln table contains runtime/runtime.go, use that to set gdbscript path.
func finddebugruntimepath(s *LSym) {
- var i int
- var p string
- var f *LSym
-
if gdbscript != "" {
return
}
- for i = 0; i < s.Pcln.Nfile; i++ {
+ var f *LSym
+ var p string
+ for i := 0; i < s.Pcln.Nfile; i++ {
f = s.Pcln.File[i]
_ = p
if i := strings.Index(f.Name, "runtime/runtime.go"); i >= 0 {
func newcfaoffsetattr(die *DWDie, offs int32) {
var block [20]byte
- var i int
- i = 0
+ i := 0
block[i] = DW_OP_call_frame_cfa
i++
}
func mkvarname(name string, da int) string {
- var buf string
- var n string
-
- buf = fmt.Sprintf("%s#%d", name, da)
- n = buf
+ buf := fmt.Sprintf("%s#%d", name, da)
+ n := buf
return n
}
// flush previous compilation unit.
func flushunit(dwinfo *DWDie, pc int64, pcsym *LSym, unitstart int64, header_length int32) {
- var here int64
-
if dwinfo != nil && pc != 0 {
newattr(dwinfo, DW_AT_high_pc, DW_CLS_ADDRESS, pc+1, pcsym)
}
uleb128put(1)
Cput(DW_LNE_end_sequence)
- here = Cpos()
+ here := Cpos()
Cseek(unitstart)
Thearch.Lput(uint32(here - unitstart - 4)) // unit_length
Thearch.Wput(2) // dwarf version
}
func writelines() {
- var s *LSym
- var epcs *LSym
- var a *Auto
- var unitstart int64
- var headerend int64
- var offs int64
- var pc int64
- var epc int64
- var i int
- var lang int
- var da int
- var dt int
- var line int
- var file int
- var dwinfo *DWDie
- var dwfunc *DWDie
- var dwvar *DWDie
- var dws **DWDie
- var varhash [HASHSIZE]*DWDie
- var n string
- var nn string
- var pcfile Pciter
- var pcline Pciter
- var files []*LSym
- var f *LSym
-
if linesec == nil {
linesec = Linklookup(Ctxt, ".dwarfline", 0)
}
linesec.R = linesec.R[:0]
- unitstart = -1
- headerend = -1
- epc = 0
- epcs = nil
+ unitstart := int64(-1)
+ headerend := int64(-1)
+ epc := int64(0)
+ epcs := (*LSym)(nil)
lineo = Cpos()
- dwinfo = nil
+ dwinfo := (*DWDie)(nil)
flushunit(dwinfo, epc, epcs, unitstart, int32(headerend-unitstart-10))
unitstart = Cpos()
- lang = DW_LANG_Go
+ lang := DW_LANG_Go
- s = Ctxt.Textp
+ s := Ctxt.Textp
dwinfo = newdie(&dwroot, DW_ABRV_COMPUNIT, "go")
newattr(dwinfo, DW_AT_language, DW_CLS_CONSTANT, int64(lang), 0)
Cput(1) // standard_opcode_lengths[9]
Cput(0) // include_directories (empty)
- files = make([]*LSym, Ctxt.Nhistfile)
+ files := make([]*LSym, Ctxt.Nhistfile)
- for f = Ctxt.Filesyms; f != nil; f = f.Next {
+ for f := Ctxt.Filesyms; f != nil; f = f.Next {
files[f.Value-1] = f
}
- for i = 0; int32(i) < Ctxt.Nhistfile; i++ {
+ for i := 0; int32(i) < Ctxt.Nhistfile; i++ {
strnput(files[i].Name, len(files[i].Name)+4)
}
uleb128put(1 + int64(Thearch.Ptrsize))
Cput(DW_LNE_set_address)
- pc = s.Value
- line = 1
- file = 1
+ pc := s.Value
+ line := 1
+ file := 1
if Linkmode == LinkExternal {
adddwarfrel(linesec, s, lineo, Thearch.Ptrsize, 0)
} else {
addrput(pc)
}
+ var a *Auto
+ var da int
+ var dt int
+ var dwfunc *DWDie
+ var dws **DWDie
+ var dwvar *DWDie
+ var n string
+ var nn string
+ var offs int64
+ var pcfile Pciter
+ var pcline Pciter
+ var varhash [HASHSIZE]*DWDie
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
s = Ctxt.Cursym
}
func writeframes() {
- var s *LSym
- var fdeo int64
- var fdesize int64
- var pad int64
- var pcsp Pciter
- var nextpc uint32
-
if framesec == nil {
framesec = Linklookup(Ctxt, ".dwarfframe", 0)
}
uleb128put(int64(-Thearch.Ptrsize) / DATAALIGNMENTFACTOR) // at cfa - x*4
// 4 is to exclude the length field.
- pad = CIERESERVE + frameo + 4 - Cpos()
+ pad := CIERESERVE + frameo + 4 - Cpos()
if pad < 0 {
Diag("dwarf: CIERESERVE too small by %d bytes.", -pad)
strnput("", int(pad))
+ var fdeo int64
+ var fdesize int64
+ var nextpc uint32
+ var pcsp Pciter
+ var s *LSym
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
s = Ctxt.Cursym
if s.Pcln == nil {
)
func writeinfo() {
- var compunit *DWDie
- var unitstart int64
- var here int64
-
fwdcount = 0
if infosec == nil {
infosec = Linklookup(Ctxt, ".dwarfinfo", 0)
}
arangessec.R = arangessec.R[:0]
- for compunit = dwroot.child; compunit != nil; compunit = compunit.link {
+ var here int64
+ var unitstart int64
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
unitstart = Cpos()
// Write .debug_info Compilation Unit Header (sec 7.5.1)
* because we need die->offs and infoo/infosize;
*/
func ispubname(die *DWDie) bool {
- var a *DWAttr
-
switch die.abbrev {
case DW_ABRV_FUNCTION,
DW_ABRV_VARIABLE:
- a = getattr(die, DW_AT_external)
+ a := getattr(die, DW_AT_external)
return a != nil && a.value != 0
}
}
func writepub(ispub func(*DWDie) bool) int64 {
- var compunit *DWDie
var die *DWDie
var dwa *DWAttr
var unitstart int64
var unitend int64
- var sectionstart int64
var here int64
- sectionstart = Cpos()
+ sectionstart := Cpos()
- for compunit = dwroot.child; compunit != nil; compunit = compunit.link {
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
unitstart = compunit.offs - COMPUNITHEADERSIZE
if compunit.link != nil {
unitend = compunit.link.offs - COMPUNITHEADERSIZE
* because we need die->offs of dw_globals.
*/
func writearanges() int64 {
- var compunit *DWDie
var b *DWAttr
var e *DWAttr
- var headersize int
- var sectionstart int64
var value int64
- sectionstart = Cpos()
- headersize = int(Rnd(4+2+4+1+1, int64(Thearch.Ptrsize))) // don't count unit_length field itself
+ sectionstart := Cpos()
+ headersize := int(Rnd(4+2+4+1+1, int64(Thearch.Ptrsize))) // don't count unit_length field itself
- for compunit = dwroot.child; compunit != nil; compunit = compunit.link {
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
b = getattr(compunit, DW_AT_low_pc)
if b == nil {
continue
}
func writegdbscript() int64 {
- var sectionstart int64
-
- sectionstart = Cpos()
+ sectionstart := Cpos()
if gdbscript != "" {
Cput(1) // magic 1 byte?
func writedwarfreloc(s *LSym) int64 {
var i int
- var ri int
- var start int64
var r *Reloc
- start = Cpos()
- for ri = 0; ri < len(s.R); ri++ {
+ start := Cpos()
+ for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
if Iself {
i = Thearch.Elfreloc1(r, int64(r.Off))
*
*/
func Dwarfemitdebugsections() {
- var infoe int64
- var die *DWDie
-
if Debug['w'] != 0 { // disable dwarf
return
}
newdie(&dwtypes, DW_ABRV_NULLTYPE, "void")
newdie(&dwtypes, DW_ABRV_BARE_PTRTYPE, "unsafe.Pointer")
- die = newdie(&dwtypes, DW_ABRV_BASETYPE, "uintptr") // needed for array size
+ die := newdie(&dwtypes, DW_ABRV_BASETYPE, "uintptr") // needed for array size
newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_unsigned, 0)
newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, int64(Thearch.Ptrsize), 0)
newattr(die, DW_AT_go_kind, DW_CLS_CONSTANT, obj.KindUintptr, 0)
infoo = Cpos()
writeinfo()
- infoe = Cpos()
+ infoe := Cpos()
pubnameso = infoe
pubtypeso = infoe
arangeso = infoe
}
func dwarfaddelfrelocheader(elfstr int, shdata *ElfShdr, off int64, size int64) {
- var sh *ElfShdr
-
- sh = newElfShdr(elfstrdbg[elfstr])
+ sh := newElfShdr(elfstrdbg[elfstr])
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
sh.type_ = SHT_RELA
} else {
}
func dwarfaddelfheaders() {
- var sh *ElfShdr
- var shinfo *ElfShdr
- var sharanges *ElfShdr
- var shline *ElfShdr
- var shframe *ElfShdr
-
if Debug['w'] != 0 { // disable dwarf
return
}
- sh = newElfShdr(elfstrdbg[ElfStrDebugAbbrev])
+ sh := newElfShdr(elfstrdbg[ElfStrDebugAbbrev])
sh.type_ = SHT_PROGBITS
sh.off = uint64(abbrevo)
sh.size = uint64(abbrevsize)
if linesympos > 0 {
putelfsymshndx(linesympos, sh.shnum)
}
- shline = sh
+ shline := sh
sh = newElfShdr(elfstrdbg[ElfStrDebugFrame])
sh.type_ = SHT_PROGBITS
if framesympos > 0 {
putelfsymshndx(framesympos, sh.shnum)
}
- shframe = sh
+ shframe := sh
sh = newElfShdr(elfstrdbg[ElfStrDebugInfo])
sh.type_ = SHT_PROGBITS
if infosympos > 0 {
putelfsymshndx(infosympos, sh.shnum)
}
- shinfo = sh
+ shinfo := sh
if pubnamessize > 0 {
- sh = newElfShdr(elfstrdbg[ElfStrDebugPubNames])
+ sh := newElfShdr(elfstrdbg[ElfStrDebugPubNames])
sh.type_ = SHT_PROGBITS
sh.off = uint64(pubnameso)
sh.size = uint64(pubnamessize)
}
if pubtypessize > 0 {
- sh = newElfShdr(elfstrdbg[ElfStrDebugPubTypes])
+ sh := newElfShdr(elfstrdbg[ElfStrDebugPubTypes])
sh.type_ = SHT_PROGBITS
sh.off = uint64(pubtypeso)
sh.size = uint64(pubtypessize)
sh.addralign = 1
}
- sharanges = nil
+ sharanges := (*ElfShdr)(nil)
if arangessize != 0 {
- sh = newElfShdr(elfstrdbg[ElfStrDebugAranges])
+ sh := newElfShdr(elfstrdbg[ElfStrDebugAranges])
sh.type_ = SHT_PROGBITS
sh.off = uint64(arangeso)
sh.size = uint64(arangessize)
}
if gdbscriptsize != 0 {
- sh = newElfShdr(elfstrdbg[ElfStrGDBScripts])
+ sh := newElfShdr(elfstrdbg[ElfStrGDBScripts])
sh.type_ = SHT_PROGBITS
sh.off = uint64(gdbscripto)
sh.size = uint64(gdbscriptsize)
* Macho
*/
func dwarfaddmachoheaders() {
- var msect *MachoSect
- var ms *MachoSeg
- var fakestart int64
- var nsect int
-
if Debug['w'] != 0 { // disable dwarf
return
}
// Zero vsize segments won't be loaded in memory, even so they
// have to be page aligned in the file.
- fakestart = abbrevo &^ 0xfff
+ fakestart := abbrevo &^ 0xfff
- nsect = 4
+ nsect := 4
if pubnamessize > 0 {
nsect++
}
nsect++
}
- ms = newMachoSeg("__DWARF", nsect)
+ ms := newMachoSeg("__DWARF", nsect)
ms.fileoffset = uint64(fakestart)
ms.filesize = uint64(abbrevo) - uint64(fakestart)
ms.vaddr = ms.fileoffset + Segdata.Vaddr - Segdata.Fileoff
- msect = newMachoSect(ms, "__debug_abbrev", "__DWARF")
+ msect := newMachoSect(ms, "__debug_abbrev", "__DWARF")
msect.off = uint32(abbrevo)
msect.size = uint64(abbrevsize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
ms.filesize += msect.size
if pubnamessize > 0 {
- msect = newMachoSect(ms, "__debug_pubnames", "__DWARF")
+ msect := newMachoSect(ms, "__debug_pubnames", "__DWARF")
msect.off = uint32(pubnameso)
msect.size = uint64(pubnamessize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
}
if pubtypessize > 0 {
- msect = newMachoSect(ms, "__debug_pubtypes", "__DWARF")
+ msect := newMachoSect(ms, "__debug_pubtypes", "__DWARF")
msect.off = uint32(pubtypeso)
msect.size = uint64(pubtypessize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
}
if arangessize > 0 {
- msect = newMachoSect(ms, "__debug_aranges", "__DWARF")
+ msect := newMachoSect(ms, "__debug_aranges", "__DWARF")
msect.off = uint32(arangeso)
msect.size = uint64(arangessize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
// TODO(lvd) fix gdb/python to load MachO (16 char section name limit)
if gdbscriptsize > 0 {
- msect = newMachoSect(ms, "__debug_gdb_scripts", "__DWARF")
+ msect := newMachoSect(ms, "__debug_gdb_scripts", "__DWARF")
msect.off = uint32(gdbscripto)
msect.size = uint64(gdbscriptsize)
msect.addr = uint64(msect.off) + Segdata.Vaddr - Segdata.Fileoff
}
func elf32phdr(e *ElfPhdr) {
- var frag int
-
if e.type_ == PT_LOAD {
// Correct ELF loaders will do this implicitly,
// but buggy ELF loaders like the one in some
// versions of QEMU won't.
- frag = int(e.vaddr & (e.align - 1))
+ frag := int(e.vaddr & (e.align - 1))
e.off -= uint64(frag)
e.vaddr -= uint64(frag)
}
func elfwriteshdrs() uint32 {
- var i int
-
if elf64 != 0 {
- for i = 0; i < int(ehdr.shnum); i++ {
+ for i := 0; i < int(ehdr.shnum); i++ {
elf64shdr(shdr[i])
}
return uint32(ehdr.shnum) * ELF64SHDRSIZE
}
- for i = 0; i < int(ehdr.shnum); i++ {
+ for i := 0; i < int(ehdr.shnum); i++ {
elf32shdr(shdr[i])
}
return uint32(ehdr.shnum) * ELF32SHDRSIZE
}
func elfwritephdrs() uint32 {
- var i int
-
if elf64 != 0 {
- for i = 0; i < int(ehdr.phnum); i++ {
+ for i := 0; i < int(ehdr.phnum); i++ {
elf64phdr(phdr[i])
}
return uint32(ehdr.phnum) * ELF64PHDRSIZE
}
- for i = 0; i < int(ehdr.phnum); i++ {
+ for i := 0; i < int(ehdr.phnum); i++ {
elf32phdr(phdr[i])
}
return uint32(ehdr.phnum) * ELF32PHDRSIZE
}
func newElfPhdr() *ElfPhdr {
- var e *ElfPhdr
-
- e = new(ElfPhdr)
+ e := new(ElfPhdr)
if ehdr.phnum >= NSECT {
Diag("too many phdrs")
} else {
}
func newElfShdr(name int64) *ElfShdr {
- var e *ElfShdr
-
- e = new(ElfShdr)
+ e := new(ElfShdr)
e.name = uint32(name)
e.shnum = int(ehdr.shnum)
if ehdr.shnum >= NSECT {
}
func elf64writehdr() uint32 {
- var i int
-
- for i = 0; i < EI_NIDENT; i++ {
+ for i := 0; i < EI_NIDENT; i++ {
Cput(ehdr.ident[i])
}
Thearch.Wput(ehdr.type_)
}
func elf32writehdr() uint32 {
- var i int
-
- for i = 0; i < EI_NIDENT; i++ {
+ for i := 0; i < EI_NIDENT; i++ {
Cput(ehdr.ident[i])
}
Thearch.Wput(ehdr.type_)
}
func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int {
- var n int
-
interp = p
- n = len(interp) + 1
+ n := len(interp) + 1
sh.addr = startva + resoff - uint64(n)
sh.off = resoff - uint64(n)
sh.size = uint64(n)
}
func elfwriteinterp() int {
- var sh *ElfShdr
-
- sh = elfshname(".interp")
+ sh := elfshname(".interp")
Cseek(int64(sh.off))
coutbuf.w.WriteString(interp)
Cput(0)
}
func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sz int) int {
- var n uint64
-
- n = 3*4 + uint64(sz) + resoff%4
+ n := 3*4 + uint64(sz) + resoff%4
sh.type_ = SHT_NOTE
sh.flags = SHF_ALLOC
}
func elfwritenotehdr(str string, namesz uint32, descsz uint32, tag uint32) *ElfShdr {
- var sh *ElfShdr
-
- sh = elfshname(str)
+ sh := elfshname(str)
// Write Elf_Note header.
Cseek(int64(sh.off))
var ELF_NOTE_NETBSD_NAME = []byte("NetBSD\x00")
func elfnetbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
- var n int
-
- n = int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4))
+ n := int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4))
return elfnote(sh, startva, resoff, n)
}
func elfwritenetbsdsig() int {
- var sh *ElfShdr
-
// Write Elf_Note header.
- sh = elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG)
+ sh := elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG)
if sh == nil {
return 0
var ELF_NOTE_OPENBSD_NAME = []byte("OpenBSD\x00")
func elfopenbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
- var n int
-
- n = ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ
+ n := ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ
return elfnote(sh, startva, resoff, n)
}
func elfwriteopenbsdsig() int {
- var sh *ElfShdr
-
// Write Elf_Note header.
- sh = elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG)
+ sh := elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG)
if sh == nil {
return 0
}
func addbuildinfo(val string) {
- var ov string
- var i int
- var b int
var j int
if val[0] != '0' || val[1] != 'x' {
Exit(2)
}
- ov = val
+ ov := val
val = val[2:]
- i = 0
+ i := 0
+ var b int
for val != "" {
if len(val) == 1 {
fmt.Fprintf(os.Stderr, "%s: -B argument must have even number of digits: %s\n", os.Args[0], ov)
var ELF_NOTE_BUILDINFO_NAME = []byte("GNU\x00")
func elfbuildinfo(sh *ElfShdr, startva uint64, resoff uint64) int {
- var n int
-
- n = int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4))
+ n := int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4))
return elfnote(sh, startva, resoff, n)
}
func elfwritebuildinfo() int {
- var sh *ElfShdr
-
- sh = elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG)
+ sh := elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG)
if sh == nil {
return 0
}
func addelflib(list **Elflib, file string, vers string) *Elfaux {
var lib *Elflib
- var aux *Elfaux
for lib = *list; lib != nil; lib = lib.next {
if lib.file == file {
*list = lib
havelib:
- for aux = lib.aux; aux != nil; aux = aux.next {
+ for aux := lib.aux; aux != nil; aux = aux.next {
if aux.vers == vers {
- goto haveaux
+ return aux
}
}
- aux = new(Elfaux)
+ aux := new(Elfaux)
aux.next = lib.aux
aux.vers = vers
lib.aux = aux
-haveaux:
return aux
}
func elfdynhash() {
- var s *LSym
- var sy *LSym
- var dynstr *LSym
- var i int
- var j int
- var nbucket int
- var b int
- var nfile int
- var hc uint32
- var chain []uint32
- var buckets []uint32
- var nsym int
- var name string
- var need []*Elfaux
- var needlib *Elflib
- var l *Elflib
- var x *Elfaux
-
if !Iself {
return
}
- nsym = Nelfsym
- s = Linklookup(Ctxt, ".hash", 0)
+ nsym := Nelfsym
+ s := Linklookup(Ctxt, ".hash", 0)
s.Type = SELFROSECT
s.Reachable = true
- i = nsym
- nbucket = 1
+ i := nsym
+ nbucket := 1
for i > 0 {
nbucket++
i >>= 1
}
- needlib = nil
- need = make([]*Elfaux, nsym)
- chain = make([]uint32, nsym)
- buckets = make([]uint32, nbucket)
+ needlib := (*Elflib)(nil)
+ need := make([]*Elfaux, nsym)
+ chain := make([]uint32, nsym)
+ buckets := make([]uint32, nbucket)
if need == nil || chain == nil || buckets == nil {
Ctxt.Cursym = nil
Diag("out of memory")
Errorexit()
}
- for i = 0; i < nsym; i++ {
+ for i := 0; i < nsym; i++ {
need[i] = nil
}
- for i = 0; i < nsym; i++ {
+ for i := 0; i < nsym; i++ {
chain[i] = 0
}
- for i = 0; i < nbucket; i++ {
+ for i := 0; i < nbucket; i++ {
buckets[i] = 0
}
- for sy = Ctxt.Allsym; sy != nil; sy = sy.Allsym {
+ var b int
+ var hc uint32
+ var name string
+ for sy := Ctxt.Allsym; sy != nil; sy = sy.Allsym {
if sy.Dynid <= 0 {
continue
}
Adduint32(Ctxt, s, uint32(nbucket))
Adduint32(Ctxt, s, uint32(nsym))
- for i = 0; i < nbucket; i++ {
+ for i := 0; i < nbucket; i++ {
Adduint32(Ctxt, s, buckets[i])
}
- for i = 0; i < nsym; i++ {
+ for i := 0; i < nsym; i++ {
Adduint32(Ctxt, s, chain[i])
}
// version symbols
- dynstr = Linklookup(Ctxt, ".dynstr", 0)
+ dynstr := Linklookup(Ctxt, ".dynstr", 0)
s = Linklookup(Ctxt, ".gnu.version_r", 0)
i = 2
- nfile = 0
- for l = needlib; l != nil; l = l.next {
+ nfile := 0
+ var j int
+ var x *Elfaux
+ for l := needlib; l != nil; l = l.next {
nfile++
// header
// version references
s = Linklookup(Ctxt, ".gnu.version", 0)
- for i = 0; i < nsym; i++ {
+ for i := 0; i < nsym; i++ {
if i == 0 {
Adduint16(Ctxt, s, 0) // first entry - no symbol
} else if need[i] == nil {
}
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
- sy = Linklookup(Ctxt, ".rela.plt", 0)
+ sy := Linklookup(Ctxt, ".rela.plt", 0)
if sy.Size > 0 {
Elfwritedynent(s, DT_PLTREL, DT_RELA)
elfwritedynentsymsize(s, DT_PLTRELSZ, sy)
elfwritedynentsym(s, DT_JMPREL, sy)
}
} else {
- sy = Linklookup(Ctxt, ".rel.plt", 0)
+ sy := Linklookup(Ctxt, ".rel.plt", 0)
if sy.Size > 0 {
Elfwritedynent(s, DT_PLTREL, DT_REL)
elfwritedynentsymsize(s, DT_PLTRELSZ, sy)
}
func elfphload(seg *Segment) *ElfPhdr {
- var ph *ElfPhdr
-
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_LOAD
if seg.Rwx&4 != 0 {
ph.flags |= PF_R
}
func elfshname(name string) *ElfShdr {
- var i int
var off int
var sh *ElfShdr
- for i = 0; i < nelfstr; i++ {
+ for i := 0; i < nelfstr; i++ {
if name == elfstr[i].s {
off = elfstr[i].off
- goto found
+ for i = 0; i < int(ehdr.shnum); i++ {
+ sh = shdr[i]
+ if sh.name == uint32(off) {
+ return sh
+ }
+ }
+
+ sh = newElfShdr(int64(off))
+ return sh
}
}
Diag("cannot find elf name %s", name)
Errorexit()
return nil
-
-found:
- for i = 0; i < int(ehdr.shnum); i++ {
- sh = shdr[i]
- if sh.name == uint32(off) {
- return sh
- }
- }
-
- sh = newElfShdr(int64(off))
- return sh
}
func elfshalloc(sect *Section) *ElfShdr {
- var sh *ElfShdr
-
- sh = elfshname(sect.Name)
+ sh := elfshname(sect.Name)
sect.Elfsect = sh
return sh
}
func elfshbits(sect *Section) *ElfShdr {
- var sh *ElfShdr
-
- sh = elfshalloc(sect)
+ sh := elfshalloc(sect)
if sh.type_ > 0 {
return sh
}
}
func elfshreloc(sect *Section) *ElfShdr {
- var typ int
- var sh *ElfShdr
- var prefix string
- var buf string
-
// If main section is SHT_NOBITS, nothing to relocate.
// Also nothing to relocate in .shstrtab.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
return nil
}
+ var prefix string
+ var typ int
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
prefix = ".rela"
typ = SHT_RELA
typ = SHT_REL
}
- buf = fmt.Sprintf("%s%s", prefix, sect.Name)
- sh = elfshname(buf)
+ buf := fmt.Sprintf("%s%s", prefix, sect.Name)
+ sh := elfshname(buf)
sh.type_ = uint32(typ)
sh.entsize = uint64(Thearch.Regsize) * 2
if typ == SHT_RELA {
}
func elfrelocsect(sect *Section, first *LSym) {
- var ri int
- var sym *LSym
- var eaddr int32
- var r *Reloc
-
// If main section is SHT_NOBITS, nothing to relocate.
// Also nothing to relocate in .shstrtab.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
}
sect.Reloff = uint64(Cpos())
+ var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
}
}
- eaddr = int32(sect.Vaddr + sect.Length)
+ eaddr := int32(sect.Vaddr + sect.Length)
+ var r *Reloc
+ var ri int
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
}
func Elfemitreloc() {
- var sect *Section
-
for Cpos()&7 != 0 {
Cput(0)
}
elfrelocsect(Segtext.Sect, Ctxt.Textp)
- for sect = Segtext.Sect.Next; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
}
func doelf() {
- var s *LSym
- var shstrtab *LSym
- var dynstr *LSym
-
if !Iself {
return
}
/* predefine strings we need for section headers */
- shstrtab = Linklookup(Ctxt, ".shstrtab", 0)
+ shstrtab := Linklookup(Ctxt, ".shstrtab", 0)
shstrtab.Type = SELFROSECT
shstrtab.Reachable = true
Addstring(shstrtab, ".gnu.version_r")
/* dynamic symbol table - first entry all zeros */
- s = Linklookup(Ctxt, ".dynsym", 0)
+ s := Linklookup(Ctxt, ".dynsym", 0)
s.Type = SELFROSECT
s.Reachable = true
if s.Size == 0 {
Addstring(s, "")
}
- dynstr = s
+ dynstr := s
/* relocation table */
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
/* ppc64 glink resolver */
if Thearch.Thechar == '9' {
- s = Linklookup(Ctxt, ".glink", 0)
+ s := Linklookup(Ctxt, ".glink", 0)
s.Reachable = true
s.Type = SELFRXSECT
}
// Do not write DT_NULL. elfdynhash will finish it.
func shsym(sh *ElfShdr, s *LSym) {
- var addr int64
- addr = Symaddr(s)
+ addr := Symaddr(s)
if sh.flags&SHF_ALLOC != 0 {
sh.addr = uint64(addr)
}
}
func Asmbelfsetup() {
- var sect *Section
-
/* This null SHdr must appear before all others */
elfshname("")
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
}
func Asmbelf(symo int64) {
- var a int64
- var o int64
- var startva int64
- var resoff int64
- var eh *ElfEhdr
- var ph *ElfPhdr
- var pph *ElfPhdr
- var pnote *ElfPhdr
- var sh *ElfShdr
- var sect *Section
-
- eh = getElfEhdr()
+ eh := getElfEhdr()
switch Thearch.Thechar {
default:
Diag("unknown architecture in asmbelf")
eh.machine = EM_PPC64
}
- startva = INITTEXT - int64(HEADR)
- resoff = ELFRESERVE
+ startva := INITTEXT - int64(HEADR)
+ resoff := int64(ELFRESERVE)
- pph = nil
+ pph := (*ElfPhdr)(nil)
+ var pnote *ElfPhdr
if Linkmode == LinkExternal {
/* skip program headers */
eh.phoff = 0
* Except on NaCl where it must not be loaded.
*/
if HEADTYPE != Hnacl {
- o = int64(Segtext.Vaddr - pph.vaddr)
+ o := int64(Segtext.Vaddr - pph.vaddr)
Segtext.Vaddr -= uint64(o)
Segtext.Length += uint64(o)
o = int64(Segtext.Fileoff - pph.off)
if Debug['d'] == 0 {
/* interpreter */
- sh = elfshname(".interp")
+ sh := elfshname(".interp")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC
resoff -= int64(elfinterp(sh, uint64(startva), uint64(resoff), interpreter))
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_INTERP
ph.flags = PF_R
phsh(ph, sh)
pnote = nil
if HEADTYPE == Hnetbsd || HEADTYPE == Hopenbsd {
- sh = nil
+ sh := (*ElfShdr)(nil)
switch HEADTYPE {
case Hnetbsd:
sh = elfshname(".note.netbsd.ident")
}
if len(buildinfo) > 0 {
- sh = elfshname(".note.gnu.build-id")
+ sh := elfshname(".note.gnu.build-id")
resoff -= int64(elfbuildinfo(sh, uint64(startva), uint64(resoff)))
if pnote == nil {
/* Dynamic linking sections */
if Debug['d'] == 0 {
- sh = elfshname(".dynsym")
+ sh := elfshname(".dynsym")
sh.type_ = SHT_DYNSYM
sh.flags = SHF_ALLOC
if elf64 != 0 {
shsym(sh, Linklookup(Ctxt, ".dynstr", 0))
if elfverneed != 0 {
- sh = elfshname(".gnu.version")
+ sh := elfshname(".gnu.version")
sh.type_ = SHT_GNU_VERSYM
sh.flags = SHF_ALLOC
sh.addralign = 2
switch eh.machine {
case EM_X86_64,
EM_PPC64:
- sh = elfshname(".rela.plt")
+ sh := elfshname(".rela.plt")
sh.type_ = SHT_RELA
sh.flags = SHF_ALLOC
sh.entsize = ELF64RELASIZE
shsym(sh, Linklookup(Ctxt, ".rela", 0))
default:
- sh = elfshname(".rel.plt")
+ sh := elfshname(".rel.plt")
sh.type_ = SHT_REL
sh.flags = SHF_ALLOC
sh.entsize = ELF32RELSIZE
}
if eh.machine == EM_PPC64 {
- sh = elfshname(".glink")
+ sh := elfshname(".glink")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC + SHF_EXECINSTR
sh.addralign = 4
// On ppc64, .got comes from the input files, so don't
// create it here, and .got.plt is not used.
if eh.machine != EM_PPC64 {
- sh = elfshname(".got")
+ sh := elfshname(".got")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC + SHF_WRITE
sh.entsize = uint64(Thearch.Regsize)
sh.addralign = uint64(Thearch.Regsize)
sh.link = uint32(elfshname(".dynstr").shnum)
shsym(sh, Linklookup(Ctxt, ".dynamic", 0))
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_DYNAMIC
ph.flags = PF_R + PF_W
phsh(ph, sh)
// not currently support it. This is handled
// appropriately in runtime/cgo.
if Ctxt.Tlsoffset != 0 && HEADTYPE != Hopenbsd {
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_TLS
ph.flags = PF_R
ph.memsz = uint64(-Ctxt.Tlsoffset)
}
if HEADTYPE == Hlinux {
- ph = newElfPhdr()
+ ph := newElfPhdr()
ph.type_ = PT_GNU_STACK
ph.flags = PF_W + PF_R
ph.align = uint64(Thearch.Regsize)
}
elfobj:
- sh = elfshname(".shstrtab")
+ sh := elfshname(".shstrtab")
sh.type_ = SHT_STRTAB
sh.addralign = 1
shsym(sh, Linklookup(Ctxt, ".shstrtab", 0))
elfshname(".strtab")
}
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
if Linkmode == LinkExternal {
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
- for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
// add a .note.GNU-stack section to mark the stack as non-executable
- sh = elfshname(".note.GNU-stack")
+ sh := elfshname(".note.GNU-stack")
sh.type_ = SHT_PROGBITS
sh.addralign = 1
// generate .tbss section for dynamic internal linking (except for OpenBSD)
// external linking generates .tbss in data.c
if Linkmode == LinkInternal && Debug['d'] == 0 && HEADTYPE != Hopenbsd {
- sh = elfshname(".tbss")
+ sh := elfshname(".tbss")
sh.type_ = SHT_NOBITS
sh.addralign = uint64(Thearch.Regsize)
sh.size = uint64(-Ctxt.Tlsoffset)
}
if Debug['s'] == 0 {
- sh = elfshname(".symtab")
+ sh := elfshname(".symtab")
sh.type_ = SHT_SYMTAB
sh.off = uint64(symo)
sh.size = uint64(Symsize)
}
Cseek(0)
- a = 0
+ a := int64(0)
a += int64(elfwritehdr())
a += int64(elfwritephdrs())
a += int64(elfwriteshdrs())
var nimport int
func hashstr(name string) int {
- var h uint32
- var cp string
-
- h = 0
- for cp = name; cp != ""; cp = cp[1:] {
+ h := uint32(0)
+ for cp := name; cp != ""; cp = cp[1:] {
h = h*1119 + uint32(cp[0])
}
h &= 0xffffff
}
func ilookup(name string) *Import {
- var h int
- var x *Import
-
- h = hashstr(name) % NIHASH
- for x = ihash[h]; x != nil; x = x.hash {
+ h := hashstr(name) % NIHASH
+ for x := ihash[h]; x != nil; x = x.hash {
if x.name[0] == name[0] && x.name == name {
return x
}
}
- x = new(Import)
+ x := new(Import)
x.name = name
x.hash = ihash[h]
ihash[h] = x
}
func ldpkg(f *Biobuf, pkg string, length int64, filename string, whence int) {
- var bdata []byte
- var data string
var p0, p1 int
- var name string
if Debug['g'] != 0 {
return
return
}
- bdata = make([]byte, length)
+ bdata := make([]byte, length)
if int64(Bread(f, bdata)) != length {
fmt.Fprintf(os.Stderr, "%s: short pkg read %s\n", os.Args[0], filename)
if Debug['u'] != 0 {
}
return
}
- data = string(bdata)
+ data := string(bdata)
// first \n$$ marks beginning of exports - skip rest of line
p0 = strings.Index(data, "\n$$")
for p0 < p1 && (data[p0] == ' ' || data[p0] == '\t' || data[p0] == '\n') {
p0++
}
- name = data[p0:]
+ name := data[p0:]
for p0 < p1 && data[p0] != ' ' && data[p0] != '\t' && data[p0] != '\n' {
p0++
}
}
func loadpkgdata(file string, pkg string, data string) {
- var p string
var prefix string
var name string
var def string
var x *Import
file = file
- p = data
+ p := data
for parsepkgdata(file, pkg, &p, &prefix, &name, &def) > 0 {
x = ilookup(name)
if x.prefix == "" {
}
func parsepkgdata(file string, pkg string, pp *string, prefixp *string, namep *string, defp *string) int {
- var p string
var prefix string
- var name string
- var def string
- var meth string
- var inquote bool
// skip white space
- p = *pp
+ p := *pp
loop:
for len(p) > 0 && (p[0] == ' ' || p[0] == '\t' || p[0] == '\n') {
prefix = prefix[:len(prefix)-len(p)-1]
// name: a.b followed by space
- name = p
+ name := p
- inquote = false
+ inquote := false
for len(p) > 0 {
if p[0] == ' ' && !inquote {
break
p = p[1:]
// def: free form to new line
- def = p
+ def := p
for len(p) > 0 && p[0] != '\n' {
p = p[1:]
p = p[1:]
// include methods on successive lines in def of named type
+ var meth string
for parsemethod(&p, &meth) > 0 {
if defbuf == nil {
defbuf = new(bytes.Buffer)
}
func parsemethod(pp *string, methp *string) int {
- var p string
-
// skip white space
- p = *pp
+ p := *pp
for len(p) > 0 && (p[0] == ' ' || p[0] == '\t') {
p = p[1:]
func loadcgo(file string, pkg string, p string) {
var next string
- var p0 string
var q string
var f []string
var local string
var lib string
var s *LSym
- p0 = ""
+ p0 := ""
for ; p != ""; p = next {
if i := strings.Index(p, "\n"); i >= 0 {
p, next = p[:i], p[i+1:]
func markflood() {
var a *Auto
- var s *LSym
var i int
- for s = markq; s != nil; s = s.Queue {
+ for s := markq; s != nil; s = s.Queue {
if s.Type == STEXT {
if Debug['v'] > 1 {
fmt.Fprintf(&Bso, "marktext %s\n", s.Name)
}
func deadcode() {
- var i int
- var s *LSym
- var last *LSym
- var p *LSym
- var fmt_ string
-
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f deadcode\n", obj.Cputime())
}
mark(Linklookup(Ctxt, INITENTRY, 0))
- for i = 0; i < len(markextra); i++ {
+ for i := 0; i < len(markextra); i++ {
mark(Linklookup(Ctxt, markextra[i], 0))
}
- for i = 0; i < len(dynexp); i++ {
+ for i := 0; i < len(dynexp); i++ {
mark(dynexp[i])
}
markflood()
// keep each beginning with 'typelink.' if the symbol it points at is being kept.
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.typelink.") {
s.Reachable = len(s.R) == 1 && s.R[0].Sym.Reachable
}
}
// remove dead text but keep file information (z symbols).
- last = nil
+ last := (*LSym)(nil)
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
if !s.Reachable {
continue
}
last.Next = nil
}
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.weak.") {
s.Special = 1 // do not lay out in data segment
s.Reachable = true
}
// record field tracking references
- fmt_ = ""
+ fmt_ := ""
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ var p *LSym
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.track.") {
s.Special = 1 // do not lay out in data segment
s.Hide = 1
if tracksym == "" {
return
}
- s = Linklookup(Ctxt, tracksym, 0)
+ s := Linklookup(Ctxt, tracksym, 0)
if !s.Reachable {
return
}
}
func doweak() {
- var s *LSym
var t *LSym
// resolve weak references only if
// target symbol will be in binary anyway.
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.weak.") {
t = Linkrlookup(Ctxt, s.Name[8:], int(s.Version))
if t != nil && t.Type != 0 && t.Reachable {
}
func addexport() {
- var i int
-
if HEADTYPE == Hdarwin {
return
}
- for i = 0; i < len(dynexp); i++ {
+ for i := 0; i < len(dynexp); i++ {
Thearch.Adddynsym(Ctxt, dynexp[i])
}
}
var pkgall *Pkg
func getpkg(path_ string) *Pkg {
- var p *Pkg
- var h int
-
- h = hashstr(path_) % len(phash)
- for p = phash[h]; p != nil; p = p.next {
+ h := hashstr(path_) % len(phash)
+ for p := phash[h]; p != nil; p = p.next {
if p.path_ == path_ {
return p
}
}
- p = new(Pkg)
+ p := new(Pkg)
p.path_ = path_
p.next = phash[h]
phash[h] = p
}
func imported(pkg string, import_ string) {
- var p *Pkg
- var i *Pkg
-
// everyone imports runtime, even runtime.
if import_ == "\"runtime\"" {
return
}
pkg = fmt.Sprintf("\"%v\"", Zconv(pkg, 0)) // turn pkg path into quoted form, freed below
- p = getpkg(pkg)
- i = getpkg(import_)
+ p := getpkg(pkg)
+ i := getpkg(import_)
i.impby = append(i.impby, p)
}
func cycle(p *Pkg) *Pkg {
- var i int
- var bad *Pkg
-
if p.checked != 0 {
return nil
}
}
p.mark = 1
- for i = 0; i < len(p.impby); i++ {
+ var bad *Pkg
+ for i := 0; i < len(p.impby); i++ {
bad = cycle(p.impby[i])
if bad != nil {
p.mark = 0
}
func importcycles() {
- var p *Pkg
-
- for p = pkgall; p != nil; p = p.all {
+ for p := pkgall; p != nil; p = p.all {
cycle(p)
}
}
* pkg: package import path, e.g. container/vector
*/
func addlibpath(ctxt *Link, srcref string, objref string, file string, pkg string) {
- var i int
- var l *Library
-
- for i = 0; i < len(ctxt.Library); i++ {
+ for i := 0; i < len(ctxt.Library); i++ {
if file == ctxt.Library[i].File {
return
}
}
ctxt.Library = append(ctxt.Library, Library{})
- l = &ctxt.Library[len(ctxt.Library)-1]
+ l := &ctxt.Library[len(ctxt.Library)-1]
l.Objref = objref
l.Srcref = srcref
l.File = file
}
func ldelf(f *Biobuf, pkg string, length int64, pn string) {
- var err error
- var base int32
+ symbols := []*LSym(nil)
+
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "%5.2f ldelf %s\n", obj.Cputime(), pn)
+ }
+
+ Ctxt.Version++
+ base := int32(Boffset(f))
+
var add uint64
+ var e binary.ByteOrder
+ var elfobj *ElfObj
+ var err error
+ var flag int
+ var hdr *ElfHdrBytes
+ var hdrbuf [64]uint8
var info uint64
- var name string
- var i int
- var j int
- var rela int
var is64 int
+ var j int
var n int
- var flag int
- var hdrbuf [64]uint8
+ var name string
var p []byte
- var hdr *ElfHdrBytes
- var elfobj *ElfObj
- var sect *ElfSect
- var rsect *ElfSect
- var sym ElfSym
- var e binary.ByteOrder
var r []Reloc
+ var rela int
var rp *Reloc
+ var rsect *ElfSect
var s *LSym
- var symbols []*LSym
-
- symbols = nil
-
- if Debug['v'] != 0 {
- fmt.Fprintf(&Bso, "%5.2f ldelf %s\n", obj.Cputime(), pn)
- }
-
- Ctxt.Version++
- base = int32(Boffset(f))
-
+ var sect *ElfSect
+ var sym ElfSym
if Bread(f, hdrbuf[:]) != len(hdrbuf) {
goto bad
}
is64 = 0
if hdr.Ident[4] == ElfClass64 {
- var hdr *ElfHdrBytes64
-
is64 = 1
- hdr = new(ElfHdrBytes64)
+ hdr := new(ElfHdrBytes64)
binary.Read(bytes.NewReader(hdrbuf[:]), binary.BigEndian, hdr) // only byte arrays; byte order doesn't matter
elfobj.type_ = uint32(e.Uint16(hdr.Type[:]))
elfobj.machine = uint32(e.Uint16(hdr.Machine[:]))
elfobj.sect = make([]ElfSect, elfobj.shnum)
elfobj.nsect = uint(elfobj.shnum)
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
if Bseek(f, int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 {
goto bad
}
if err = elfmap(elfobj, sect); err != nil {
goto bad
}
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
if elfobj.sect[i].nameoff != 0 {
elfobj.sect[i].name = cstring(sect.base[elfobj.sect[i].nameoff:])
}
// as well use one large chunk.
// create symbols for elfmapped sections
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
sect = &elfobj.sect[i]
if (sect.type_ != ElfSectProgbits && sect.type_ != ElfSectNobits) || sect.flags&ElfSectFlagAlloc == 0 {
continue
Errorexit()
}
- for i = 1; i < elfobj.nsymtab; i++ {
+ for i := 1; i < elfobj.nsymtab; i++ {
if err = readelfsym(elfobj, i, &sym, 1); err != nil {
goto bad
}
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
s = elfobj.sect[i].sym
if s == nil {
continue
}
// load relocations
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
rsect = &elfobj.sect[i]
if rsect.type_ != ElfSectRela && rsect.type_ != ElfSectRel {
continue
}
func section(elfobj *ElfObj, name string) *ElfSect {
- var i int
-
- for i = 0; uint(i) < elfobj.nsect; i++ {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
if elfobj.sect[i].name != "" && name != "" && elfobj.sect[i].name == name {
return &elfobj.sect[i]
}
}
func readelfsym(elfobj *ElfObj, i int, sym *ElfSym, needSym int) (err error) {
- var s *LSym
-
if i >= elfobj.nsymtab || i < 0 {
err = fmt.Errorf("invalid elf symbol index")
return err
sym.other = b.Other
}
- s = nil
+ s := (*LSym)(nil)
if sym.name == "_GLOBAL_OFFSET_TABLE_" {
sym.name = ".got"
}
}
func (x rbyoff) Less(i, j int) bool {
- var a *Reloc
- var b *Reloc
-
- a = &x[i]
- b = &x[j]
+ a := &x[i]
+ b := &x[j]
if a.Off < b.Off {
return true
}
)
func unpackcmd(p []byte, m *LdMachoObj, c *LdMachoCmd, type_ uint, sz uint) int {
- var e4 func([]byte) uint32
- var e8 func([]byte) uint64
- var s *LdMachoSect
- var i int
-
- e4 = m.e.Uint32
- e8 = m.e.Uint64
+ e4 := m.e.Uint32
+ e8 := m.e.Uint64
c.type_ = int(type_)
c.size = uint32(sz)
return -1
}
p = p[56:]
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ var s *LdMachoSect
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
s = &c.seg.sect[i]
s.name = cstring(p[0:16])
s.segname = cstring(p[16:32])
return -1
}
p = p[72:]
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ var s *LdMachoSect
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
s = &c.seg.sect[i]
s.name = cstring(p[0:16])
s.segname = cstring(p[16:32])
}
func macholoadrel(m *LdMachoObj, sect *LdMachoSect) int {
- var rel []LdMachoRel
- var r *LdMachoRel
- var buf []byte
- var p []byte
- var i int
- var n int
- var v uint32
-
if sect.rel != nil || sect.nreloc == 0 {
return 0
}
- rel = make([]LdMachoRel, sect.nreloc)
- n = int(sect.nreloc * 8)
- buf = make([]byte, n)
+ rel := make([]LdMachoRel, sect.nreloc)
+ n := int(sect.nreloc * 8)
+ buf := make([]byte, n)
if Bseek(m.f, m.base+int64(sect.reloff), 0) < 0 || Bread(m.f, buf) != n {
return -1
}
- for i = 0; uint32(i) < sect.nreloc; i++ {
+ var p []byte
+ var r *LdMachoRel
+ var v uint32
+ for i := 0; uint32(i) < sect.nreloc; i++ {
r = &rel[i]
p = buf[i*8:]
r.addr = m.e.Uint32(p)
}
func macholoaddsym(m *LdMachoObj, d *LdMachoDysymtab) int {
- var p []byte
- var i int
- var n int
-
- n = int(d.nindirectsyms)
+ n := int(d.nindirectsyms)
- p = make([]byte, n*4)
+ p := make([]byte, n*4)
if Bseek(m.f, m.base+int64(d.indirectsymoff), 0) < 0 || Bread(m.f, p) != len(p) {
return -1
}
d.indir = make([]uint32, n)
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
d.indir[i] = m.e.Uint32(p[4*i:])
}
return 0
}
func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int {
- var strbuf []byte
- var symbuf []byte
- var p []byte
- var i int
- var n int
- var symsize int
- var sym []LdMachoSym
- var s *LdMachoSym
- var v uint32
-
if symtab.sym != nil {
return 0
}
- strbuf = make([]byte, symtab.strsize)
+ strbuf := make([]byte, symtab.strsize)
if Bseek(m.f, m.base+int64(symtab.stroff), 0) < 0 || Bread(m.f, strbuf) != len(strbuf) {
return -1
}
- symsize = 12
+ symsize := 12
if m.is64 {
symsize = 16
}
- n = int(symtab.nsym * uint32(symsize))
- symbuf = make([]byte, n)
+ n := int(symtab.nsym * uint32(symsize))
+ symbuf := make([]byte, n)
if Bseek(m.f, m.base+int64(symtab.symoff), 0) < 0 || Bread(m.f, symbuf) != len(symbuf) {
return -1
}
- sym = make([]LdMachoSym, symtab.nsym)
- p = symbuf
- for i = 0; uint32(i) < symtab.nsym; i++ {
+ sym := make([]LdMachoSym, symtab.nsym)
+ p := symbuf
+ var s *LdMachoSym
+ var v uint32
+ for i := 0; uint32(i) < symtab.nsym; i++ {
s = &sym[i]
v = m.e.Uint32(p)
if v >= symtab.strsize {
func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
var err error
- var i int
var j int
var is64 bool
var secaddr uint64
var hdr [7 * 4]uint8
var cmdp []byte
- var tmp [4]uint8
var dat []byte
var ncmd uint32
var cmdsz uint32
var off uint32
var m *LdMachoObj
var e binary.ByteOrder
- var base int64
var sect *LdMachoSect
var rel *LdMachoRel
var rpi int
var name string
Ctxt.Version++
- base = Boffset(f)
+ base := Boffset(f)
if Bread(f, hdr[:]) != len(hdr) {
goto bad
}
}
if is64 {
+ var tmp [4]uint8
Bread(f, tmp[:4]) // skip reserved word in header
}
symtab = nil
dsymtab = nil
- for i = 0; uint32(i) < ncmd; i++ {
+ for i := 0; uint32(i) < ncmd; i++ {
ty = e.Uint32(cmdp)
sz = e.Uint32(cmdp[4:])
m.cmd[i].off = off
goto bad
}
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
if sect.segname != "__TEXT" && sect.segname != "__DATA" {
continue
// enter sub-symbols into symbol table.
// have to guess sizes from next symbol.
- for i = 0; uint32(i) < symtab.nsym; i++ {
- var v int
+ for i := 0; uint32(i) < symtab.nsym; i++ {
sym = &symtab.sym[i]
if sym.type_&N_STAB != 0 {
continue
if name[0] == '_' && name[1] != '\x00' {
name = name[1:]
}
- v = 0
+ v := 0
if sym.type_&N_EXT == 0 {
v = Ctxt.Version
}
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
s = sect.sym
if s == nil {
}
// load relocations
- for i = 0; uint32(i) < c.seg.nsect; i++ {
+ for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
s = sect.sym
if s == nil {
rp = &r[rpi]
rel = §.rel[j]
if rel.scattered != 0 {
- var k int
- var ks *LdMachoSect
-
if Thearch.Thechar != '8' {
// mach-o only uses scattered relocation on 32-bit platforms
Diag("unexpected scattered relocation")
// now consider the desired symbol.
// find the section where it lives.
- for k = 0; uint32(k) < c.seg.nsect; k++ {
+ var ks *LdMachoSect
+ for k := 0; uint32(k) < c.seg.nsect; k++ {
ks = &c.seg.sect[k]
if ks.addr <= uint64(rel.value) && uint64(rel.value) < ks.addr+ks.size {
- goto foundk
+ if ks.sym != nil {
+ rp.Sym = ks.sym
+ rp.Add += int64(uint64(rel.value) - ks.addr)
+ } else if ks.segname == "__IMPORT" && ks.name == "__pointers" {
+ // handle reference to __IMPORT/__pointers.
+ // how much worse can this get?
+ // why are we supporting 386 on the mac anyway?
+ rp.Type = 512 + MACHO_FAKE_GOTPCREL
+
+ // figure out which pointer this is a reference to.
+ k = int(uint64(ks.res1) + (uint64(rel.value)-ks.addr)/4)
+
+ // load indirect table for __pointers
+ // fetch symbol number
+ if dsymtab == nil || k < 0 || uint32(k) >= dsymtab.nindirectsyms || dsymtab.indir == nil {
+ err = fmt.Errorf("invalid scattered relocation: indirect symbol reference out of range")
+ goto bad
+ }
+
+ k = int(dsymtab.indir[k])
+ if k < 0 || uint32(k) >= symtab.nsym {
+ err = fmt.Errorf("invalid scattered relocation: symbol reference out of range")
+ goto bad
+ }
+
+ rp.Sym = symtab.sym[k].sym
+ } else {
+ err = fmt.Errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name)
+ goto bad
+ }
+
+ rpi++
+
+ // skip #1 of 2 rel; continue skips #2 of 2.
+ j++
+
+ continue
}
}
err = fmt.Errorf("unsupported scattered relocation: invalid address %#x", rel.addr)
goto bad
- foundk:
- if ks.sym != nil {
- rp.Sym = ks.sym
- rp.Add += int64(uint64(rel.value) - ks.addr)
- } else if ks.segname == "__IMPORT" && ks.name == "__pointers" {
- // handle reference to __IMPORT/__pointers.
- // how much worse can this get?
- // why are we supporting 386 on the mac anyway?
- rp.Type = 512 + MACHO_FAKE_GOTPCREL
-
- // figure out which pointer this is a reference to.
- k = int(uint64(ks.res1) + (uint64(rel.value)-ks.addr)/4)
-
- // load indirect table for __pointers
- // fetch symbol number
- if dsymtab == nil || k < 0 || uint32(k) >= dsymtab.nindirectsyms || dsymtab.indir == nil {
- err = fmt.Errorf("invalid scattered relocation: indirect symbol reference out of range")
- goto bad
- }
-
- k = int(dsymtab.indir[k])
- if k < 0 || uint32(k) >= symtab.nsym {
- err = fmt.Errorf("invalid scattered relocation: symbol reference out of range")
- goto bad
- }
-
- rp.Sym = symtab.sym[k].sym
- } else {
- err = fmt.Errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name)
- goto bad
- }
-
- rpi++
-
- // skip #1 of 2 rel; continue skips #2 of 2.
- j++
-
- continue
}
rp.Siz = rel.length
}
func ldpe(f *Biobuf, pkg string, length int64, pn string) {
- var err error
- var name string
- var base int32
- var l uint32
- var i int
- var j int
- var numaux int
- var peobj *PeObj
- var sect *PeSect
- var rsect *PeSect
- var symbuf [18]uint8
- var s *LSym
- var r []Reloc
- var rp *Reloc
- var sym *PeSym
-
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f ldpe %s\n", obj.Cputime(), pn)
}
- sect = nil
+ sect := (*PeSect)(nil)
Ctxt.Version++
- base = int32(Boffset(f))
+ base := int32(Boffset(f))
- peobj = new(PeObj)
+ peobj := new(PeObj)
peobj.f = f
peobj.base = uint32(base)
peobj.name = pn
// read header
+ var err error
+ var j int
+ var l uint32
+ var name string
+ var numaux int
+ var r []Reloc
+ var rp *Reloc
+ var rsect *PeSect
+ var s *LSym
+ var sym *PeSym
+ var symbuf [18]uint8
if err = binary.Read(f, binary.LittleEndian, &peobj.fh); err != nil {
goto bad
}
peobj.sect = make([]PeSect, peobj.fh.NumberOfSections)
peobj.nsect = uint(peobj.fh.NumberOfSections)
- for i = 0; i < int(peobj.fh.NumberOfSections); i++ {
+ for i := 0; i < int(peobj.fh.NumberOfSections); i++ {
if err = binary.Read(f, binary.LittleEndian, &peobj.sect[i].sh); err != nil {
goto bad
}
}
// rewrite section names if they start with /
- for i = 0; i < int(peobj.fh.NumberOfSections); i++ {
+ for i := 0; i < int(peobj.fh.NumberOfSections); i++ {
if peobj.sect[i].name == "" {
continue
}
peobj.npesym = uint(peobj.fh.NumberOfSymbols)
Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable), 0)
- for i = 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 {
+ for i := 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 {
Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0)
if Bread(f, symbuf[:]) != len(symbuf) {
goto bad
}
// create symbols for mapped sections
- for i = 0; uint(i) < peobj.nsect; i++ {
+ for i := 0; uint(i) < peobj.nsect; i++ {
sect = &peobj.sect[i]
if sect.sh.Characteristics&IMAGE_SCN_MEM_DISCARDABLE != 0 {
continue
}
// load relocations
- for i = 0; uint(i) < peobj.nsect; i++ {
+ for i := 0; uint(i) < peobj.nsect; i++ {
rsect = &peobj.sect[i]
if rsect.sym == nil || rsect.sh.NumberOfRelocations == 0 {
continue
if Bread(f, symbuf[:10]) != 10 {
goto bad
}
- var rva uint32
- var symindex uint32
- var type_ uint16
- rva = Le32(symbuf[0:])
- symindex = Le32(symbuf[4:])
- type_ = Le16(symbuf[8:])
+ rva := Le32(symbuf[0:])
+ symindex := Le32(symbuf[4:])
+ type_ := Le16(symbuf[8:])
if err = readpesym(peobj, int(symindex), &sym); err != nil {
goto bad
}
}
// enter sub-symbols into symbol table.
- for i = 0; uint(i) < peobj.npesym; i++ {
+ for i := 0; uint(i) < peobj.npesym; i++ {
if peobj.pesym[i].name == "" {
continue
}
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
- for i = 0; uint(i) < peobj.nsect; i++ {
+ for i := 0; uint(i) < peobj.nsect; i++ {
s = peobj.sect[i].sym
if s == nil {
continue
}
func readpesym(peobj *PeObj, i int, y **PeSym) (err error) {
- var s *LSym
- var sym *PeSym
- var name string
-
if uint(i) >= peobj.npesym || i < 0 {
err = fmt.Errorf("invalid pe symbol index")
return err
}
- sym = &peobj.pesym[i]
+ sym := &peobj.pesym[i]
*y = sym
+ var name string
if issect(sym) {
name = peobj.sect[sym.sectnum-1].sym.Name
} else {
name = name[:i]
}
+ var s *LSym
switch sym.type_ {
default:
err = fmt.Errorf("%s: invalid symbol type %d", sym.name, sym.type_)
}
func libinit() {
- var suffix string
- var suffixsep string
-
Funcalign = Thearch.Funcalign
mywhatsys() // get goroot, goarch, goos
// add goroot to the end of the libdir list.
- suffix = ""
+ suffix := ""
- suffixsep = ""
+ suffixsep := ""
if flag_installsuffix != "" {
suffixsep = "_"
suffix = flag_installsuffix
func loadinternal(name string) {
var pname string
- var i int
- var found int
- found = 0
- for i = 0; i < len(Ctxt.Libdir); i++ {
+ found := 0
+ for i := 0; i < len(Ctxt.Libdir); i++ {
pname = fmt.Sprintf("%s/%s.a", Ctxt.Libdir[i], name)
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "searching for %s.a in %s\n", name, pname)
}
func loadlib() {
- var i int
- var w int
- var x int
- var s *LSym
- var tlsg *LSym
- var cgostrsym string
-
if Flag_shared != 0 {
- s = Linklookup(Ctxt, "runtime.islibrary", 0)
+ s := Linklookup(Ctxt, "runtime.islibrary", 0)
s.Dupok = 1
Adduint8(Ctxt, s, 1)
}
loadinternal("runtime/race")
}
+ var i int
for i = 0; i < len(Ctxt.Library); i++ {
if Debug['v'] > 1 {
fmt.Fprintf(&Bso, "%5.2f autolib: %s (from %s)\n", obj.Cputime(), Ctxt.Library[i].File, Ctxt.Library[i].Objref)
}
// Pretend that we really imported the package.
- s = Linklookup(Ctxt, "go.importpath.runtime/cgo.", 0)
+ s := Linklookup(Ctxt, "go.importpath.runtime/cgo.", 0)
s.Type = SDATA
s.Dupok = 1
// Provided by the code that imports the package.
// Since we are simulating the import, we have to provide this string.
- cgostrsym = "go.string.\"runtime/cgo\""
+ cgostrsym := "go.string.\"runtime/cgo\""
if Linkrlookup(Ctxt, cgostrsym, 0) == nil {
- s = Linklookup(Ctxt, cgostrsym, 0)
+ s := Linklookup(Ctxt, cgostrsym, 0)
s.Type = SRODATA
s.Reachable = true
addstrdata(cgostrsym, "runtime/cgo")
if Linkmode == LinkInternal {
// Drop all the cgo_import_static declarations.
// Turns out we won't be needing them.
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type == SHOSTOBJ {
// If a symbol was marked both
// cgo_import_static and cgo_import_dynamic,
}
}
- tlsg = Linklookup(Ctxt, "runtime.tlsg", 0)
+ tlsg := Linklookup(Ctxt, "runtime.tlsg", 0)
// For most ports, runtime.tlsg is a placeholder symbol for TLS
// relocation. However, the Android and Darwin arm ports need it
Ctxt.Tlsg = tlsg
// Now that we know the link mode, trim the dynexp list.
- x = CgoExportDynamic
+ x := CgoExportDynamic
if Linkmode == LinkExternal {
x = CgoExportStatic
}
- w = 0
- for i = 0; i < len(dynexp); i++ {
+ w := 0
+ for i := 0; i < len(dynexp); i++ {
if int(dynexp[i].Cgoexport)&x != 0 {
dynexp[w] = dynexp[i]
w++
}
func objfile(file string, pkg string) {
- var off int64
- var l int64
- var f *Biobuf
- var pname string
- var arhdr ArHdr
-
pkg = pathtoprefix(pkg)
if Debug['v'] > 1 {
}
Bflush(&Bso)
var err error
+ var f *Biobuf
f, err = Bopenr(file)
if err != nil {
Diag("cannot open file %s: %v", file, err)
magbuf := make([]byte, len(ARMAG))
if Bread(f, magbuf) != len(magbuf) || !strings.HasPrefix(string(magbuf), ARMAG) {
/* load it as a regular file */
- l = Bseek(f, 0, 2)
+ l := Bseek(f, 0, 2)
Bseek(f, 0, 0)
ldobj(f, pkg, l, file, file, FileObj)
}
/* skip over optional __.GOSYMDEF and process __.PKGDEF */
- off = Boffset(f)
+ off := Boffset(f)
- l = nextar(f, off, &arhdr)
+ var arhdr ArHdr
+ l := nextar(f, off, &arhdr)
+ var pname string
if l <= 0 {
Diag("%s: short read on archive file symbol header", file)
goto out
}
func ldhostobj(ld func(*Biobuf, string, int64, string), f *Biobuf, pkg string, length int64, pn string, file string) {
- var i int
- var isinternal int
- var h *Hostobj
-
- isinternal = 0
- for i = 0; i < len(internalpkg); i++ {
+ isinternal := 0
+ for i := 0; i < len(internalpkg); i++ {
if pkg == internalpkg[i] {
isinternal = 1
break
}
hostobj = append(hostobj, Hostobj{})
- h = &hostobj[len(hostobj)-1]
+ h := &hostobj[len(hostobj)-1]
h.ld = ld
h.pkg = pkg
h.pn = pn
}
func hostobjs() {
- var i int
var f *Biobuf
var h *Hostobj
- for i = 0; i < len(hostobj); i++ {
+ for i := 0; i < len(hostobj); i++ {
h = &hostobj[i]
var err error
f, err = Bopenr(h.file)
}
func hostlinksetup() {
- var p string
-
if Linkmode != LinkExternal {
return
}
// change our output to temporary object file
cout.Close()
- p = fmt.Sprintf("%s/go.o", tmpdir)
+ p := fmt.Sprintf("%s/go.o", tmpdir)
var err error
cout, err = os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
if err != nil {
var hostlink_buf = make([]byte, 64*1024)
func hostlink() {
- var p string
- var argv []string
- var i int
- var n int
- var length int
- var h *Hostobj
- var f *Biobuf
-
if Linkmode != LinkExternal || nerrors > 0 {
return
}
if extld == "" {
extld = "gcc"
}
+ var argv []string
argv = append(argv, extld)
switch Thearch.Thechar {
case '8':
// already wrote main object file
// copy host objects to temporary directory
- for i = 0; i < len(hostobj); i++ {
+ var f *Biobuf
+ var h *Hostobj
+ var length int
+ var n int
+ var p string
+ for i := 0; i < len(hostobj); i++ {
h = &hostobj[i]
var err error
f, err = Bopenr(h.file)
length -= n
}
- if err = w.Close(); err != nil {
+ if err := w.Close(); err != nil {
Ctxt.Cursym = nil
Diag("cannot write %s: %v", p, err)
Errorexit()
}
argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+ var i int
for i = 0; i < len(ldflag); i++ {
argv = append(argv, ldflag[i])
}
}
func ldobj(f *Biobuf, pkg string, length int64, pn string, file string, whence int) {
- var line string
- var c1 int
- var c2 int
- var c3 int
- var c4 int
- var magic uint32
- var import0 int64
- var import1 int64
- var eof int64
- var start int64
- var t string
-
- eof = Boffset(f) + length
+ eof := Boffset(f) + length
pn = pn
- start = Boffset(f)
- c1 = Bgetc(f)
- c2 = Bgetc(f)
- c3 = Bgetc(f)
- c4 = Bgetc(f)
+ start := Boffset(f)
+ c1 := Bgetc(f)
+ c2 := Bgetc(f)
+ c3 := Bgetc(f)
+ c4 := Bgetc(f)
Bseek(f, start, 0)
- magic = uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
+ magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
if magic == 0x7f454c46 { // \x7F E L F
ldhostobj(ldelf, f, pkg, length, pn, file)
return
}
/* check the header */
- line = Brdline(f, '\n')
+ line := Brdline(f, '\n')
+ var import0 int64
+ var import1 int64
+ var t string
if line == "" {
if Blinelen(f) > 0 {
Diag("%s: not an object file", pn)
}
func zerosig(sp string) {
- var s *LSym
-
- s = Linklookup(Ctxt, sp, 0)
+ s := Linklookup(Ctxt, sp, 0)
s.Sig = 0
}
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
- goto escape
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
}
}
return s
-
-escape:
- var buf bytes.Buffer
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
- fmt.Fprintf(&buf, "%%%02x", c)
- continue
- }
- buf.WriteByte(c)
- }
- return buf.String()
}
func iconv(p string) string {
- var fp string
-
if p == "" {
+ var fp string
fp += "<nil>"
return fp
}
p = pathtoprefix(p)
+ var fp string
fp += p
return fp
}
func addsection(seg *Segment, name string, rwx int) *Section {
var l **Section
- var sect *Section
for l = &seg.Sect; *l != nil; l = &(*l).Next {
}
- sect = new(Section)
+ sect := new(Section)
sect.Rwx = uint8(rwx)
sect.Name = name
sect.Seg = seg
func dostkcheck() {
var ch Chain
- var s *LSym
morestack = Linklookup(Ctxt, "runtime.morestack", 0)
newstack = Linklookup(Ctxt, "runtime.newstack", 0)
// Check every function, but do the nosplit functions in a first pass,
// to make the printed failure chains as short as possible.
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
// runtime.racesymbolizethunk is called from gcc-compiled C
// code running on the operating system thread stack.
// It uses more than the usual amount of stack but that's okay.
}
}
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
if s.Nosplit == 0 {
Ctxt.Cursym = s
ch.sym = s
}
func stkcheck(up *Chain, depth int) int {
- var ch Chain
- var ch1 Chain
- var s *LSym
- var limit int
- var r *Reloc
- var ri int
- var endr int
- var pcsp Pciter
-
- limit = up.limit
- s = up.sym
+ limit := up.limit
+ s := up.sym
// Don't duplicate work: only need to consider each
// function at top of safe zone once.
return 0
}
+ var ch Chain
ch.up = up
// Walk through sp adjustments in function, consuming relocs.
- ri = 0
+ ri := 0
- endr = len(s.R)
+ endr := len(s.R)
+ var ch1 Chain
+ var pcsp Pciter
+ var r *Reloc
for pciterinit(Ctxt, &pcsp, &s.Pcln.Pcsp); pcsp.done == 0; pciternext(&pcsp) {
// pcsp.value is in effect for [pcsp.pc, pcsp.nextpc).
func Yconv(s *LSym) string {
var fp string
- var fmt_ string
- var i int
- var str string
-
if s == nil {
fp += fmt.Sprintf("<nil>")
} else {
- fmt_ = ""
+ fmt_ := ""
fmt_ += fmt.Sprintf("%s @0x%08x [%d]", s.Name, int64(s.Value), int64(s.Size))
- for i = 0; int64(i) < s.Size; i++ {
+ for i := 0; int64(i) < s.Size; i++ {
if i%8 == 0 {
fmt_ += fmt.Sprintf("\n\t0x%04x ", i)
}
}
fmt_ += fmt.Sprintf("\n")
- for i = 0; i < len(s.R); i++ {
+ for i := 0; i < len(s.R); i++ {
fmt_ += fmt.Sprintf("\t0x%04x[%x] %d %s[%x]\n", s.R[i].Off, s.R[i].Siz, s.R[i].Type, s.R[i].Sym.Name, int64(s.R[i].Add))
}
- str = fmt_
+ str := fmt_
fp += str
}
}
func setheadtype(s string) {
- var h int
-
- h = headtype(s)
+ h := headtype(s)
if h < 0 {
fmt.Fprintf(os.Stderr, "unknown header type -H %s\n", s)
Errorexit()
}
func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
- var a *Auto
- var s *LSym
- var off int32
-
// These symbols won't show up in the first loop below because we
// skip STEXT symbols. Normal STEXT symbols are emitted by walking textp.
- s = Linklookup(Ctxt, "runtime.text", 0)
+ s := Linklookup(Ctxt, "runtime.text", 0)
if s.Type == STEXT {
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
}
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Hide != 0 || (s.Name[0] == '.' && s.Version == 0 && s.Name != ".rathole") {
continue
}
}
}
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ var a *Auto
+ var off int32
+ for s := Ctxt.Textp; s != nil; s = s.Next {
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), s.Gotype)
// NOTE(ality): acid can't produce a stack trace without .frame symbols
}
func xdefine(p string, t int, v int64) {
- var s *LSym
-
- s = Linklookup(Ctxt, p, 0)
+ s := Linklookup(Ctxt, p, 0)
s.Type = int16(t)
s.Value = v
s.Reachable = true
}
func Entryvalue() int64 {
- var a string
- var s *LSym
-
- a = INITENTRY
+ a := INITENTRY
if a[0] >= '0' && a[0] <= '9' {
return atolwhex(a)
}
- s = Linklookup(Ctxt, a, 0)
+ s := Linklookup(Ctxt, a, 0)
if s.Type == 0 {
return INITTEXT
}
}
func undefsym(s *LSym) {
- var i int
var r *Reloc
Ctxt.Cursym = s
- for i = 0; i < len(s.R); i++ {
+ for i := 0; i < len(s.R); i++ {
r = &s.R[i]
if r.Sym == nil { // happens for some external ARM relocs
continue
}
func undef() {
- var s *LSym
-
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
undefsym(s)
}
- for s = datap; s != nil; s = s.Next {
+ for s := datap; s != nil; s = s.Next {
undefsym(s)
}
if nerrors > 0 {
}
func callgraph() {
- var s *LSym
- var r *Reloc
- var i int
-
if Debug['c'] == 0 {
return
}
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ var i int
+ var r *Reloc
+ for s := Ctxt.Textp; s != nil; s = s.Next {
for i = 0; i < len(s.R); i++ {
r = &s.R[i]
if r.Sym == nil {
}
func checkgo() {
- var s *LSym
- var r *Reloc
- var i int
- var changed int
-
if Debug['C'] == 0 {
return
}
// which would simplify this logic quite a bit.
// Mark every Go-called C function with cfunc=2, recursively.
+ var changed int
+ var i int
+ var r *Reloc
+ var s *LSym
for {
changed = 0
for s = Ctxt.Textp; s != nil; s = s.Next {
// Complain about Go-called C functions that can split the stack
// (that can be preempted for garbage collection or trigger a stack copy).
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
for i = 0; i < len(s.R); i++ {
r = &s.R[i]
}
func Rnd(v int64, r int64) int64 {
- var c int64
-
if r <= 0 {
return v
}
v += r - 1
- c = v % r
+ c := v % r
if c < 0 {
c += r
}
}
func newMachoSeg(name string, msect int) *MachoSeg {
- var s *MachoSeg
-
if nseg >= len(seg) {
Diag("too many segs")
Errorexit()
}
- s = &seg[nseg]
+ s := &seg[nseg]
nseg++
s.name = name
s.msect = uint32(msect)
}
func newMachoSect(seg *MachoSeg, name string, segname string) *MachoSect {
- var s *MachoSect
-
if seg.nsect >= seg.msect {
Diag("too many sects in segment %s", seg.name)
Errorexit()
}
- s = &seg.sect[seg.nsect]
+ s := &seg.sect[seg.nsect]
seg.nsect++
s.name = name
s.segname = segname
var linkoff int64
func machowrite() int {
- var o1 int64
- var loadsize int
- var i int
- var j int
- var s *MachoSeg
- var t *MachoSect
- var l *MachoLoad
-
- o1 = Cpos()
+ o1 := Cpos()
- loadsize = 4 * 4 * ndebug
- for i = 0; i < len(load); i++ {
+ loadsize := 4 * 4 * ndebug
+ for i := 0; i < len(load); i++ {
loadsize += 4 * (len(load[i].data) + 2)
}
if macho64 {
Thearch.Lput(0) /* reserved */
}
- for i = 0; i < nseg; i++ {
+ var j int
+ var s *MachoSeg
+ var t *MachoSect
+ for i := 0; i < nseg; i++ {
s = &seg[i]
if macho64 {
Thearch.Lput(25) /* segment 64 */
}
}
- for i = 0; i < len(load); i++ {
+ var l *MachoLoad
+ for i := 0; i < len(load); i++ {
l = &load[i]
Thearch.Lput(l.type_)
Thearch.Lput(4 * (uint32(len(l.data)) + 2))
}
func domacho() {
- var s *LSym
-
if Debug['d'] != 0 {
return
}
// empirically, string table must begin with " \x00".
- s = Linklookup(Ctxt, ".machosymstr", 0)
+ s := Linklookup(Ctxt, ".machosymstr", 0)
s.Type = SMACHOSYMSTR
s.Reachable = true
s.Reachable = true
if Linkmode != LinkExternal {
- s = Linklookup(Ctxt, ".plt", 0) // will be __symbol_stub
+ s := Linklookup(Ctxt, ".plt", 0) // will be __symbol_stub
s.Type = SMACHOPLT
s.Reachable = true
}
func machoshbits(mseg *MachoSeg, sect *Section, segname string) {
- var msect *MachoSect
- var buf string
+ buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1)
- buf = "__" + strings.Replace(sect.Name[1:], ".", "_", -1)
-
- msect = newMachoSect(mseg, buf, segname)
+ msect := newMachoSect(mseg, buf, segname)
if sect.Rellen > 0 {
msect.reloc = uint32(sect.Reloff)
msect.nreloc = uint32(sect.Rellen / 8)
}
func Asmbmacho() {
- var v int64
- var w int64
- var va int64
- var a int
- var i int
- var mh *MachoHdr
- var ms *MachoSeg
- var ml *MachoLoad
- var sect *Section
-
/* apple MACH */
- va = INITTEXT - int64(HEADR)
+ va := INITTEXT - int64(HEADR)
- mh = getMachoHdr()
+ mh := getMachoHdr()
switch Thearch.Thechar {
default:
Diag("unknown mach architecture")
mh.subcpu = MACHO_SUBCPU_X86
}
- ms = nil
+ ms := (*MachoSeg)(nil)
if Linkmode == LinkExternal {
/* segment for entire file */
ms = newMachoSeg("", 40)
}
/* text */
- v = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND))
+ v := Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND))
if Linkmode != LinkExternal {
ms = newMachoSeg("__TEXT", 20)
ms.prot2 = 5
}
- for sect = Segtext.Sect; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
machoshbits(ms, sect, "__TEXT")
}
/* data */
if Linkmode != LinkExternal {
- w = int64(Segdata.Length)
+ w := int64(Segdata.Length)
ms = newMachoSeg("__DATA", 20)
ms.vaddr = uint64(va) + uint64(v)
ms.vsize = uint64(w)
ms.prot2 = 3
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
machoshbits(ms, sect, "__DATA")
}
fallthrough
case '5':
- ml = newMachoLoad(5, 17+2) /* unix thread */
+ ml := newMachoLoad(5, 17+2) /* unix thread */
ml.data[0] = 1 /* thread type */
ml.data[1] = 17 /* word count */
ml.data[2+15] = uint32(Entryvalue()) /* start pc */
case '6':
- ml = newMachoLoad(5, 42+2) /* unix thread */
+ ml := newMachoLoad(5, 42+2) /* unix thread */
ml.data[0] = 4 /* thread type */
ml.data[1] = 42 /* word count */
ml.data[2+32] = uint32(Entryvalue()) /* start pc */
ml.data[2+32+1] = uint32(Entryvalue() >> 16 >> 16) // hide >>32 for 8l
case '8':
- ml = newMachoLoad(5, 16+2) /* unix thread */
+ ml := newMachoLoad(5, 16+2) /* unix thread */
ml.data[0] = 1 /* thread type */
ml.data[1] = 16 /* word count */
ml.data[2+10] = uint32(Entryvalue()) /* start pc */
}
if Debug['d'] == 0 {
- var s1 *LSym
- var s2 *LSym
- var s3 *LSym
- var s4 *LSym
-
// must match domacholink below
- s1 = Linklookup(Ctxt, ".machosymtab", 0)
+ s1 := Linklookup(Ctxt, ".machosymtab", 0)
- s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
- s3 = Linklookup(Ctxt, ".linkedit.got", 0)
- s4 = Linklookup(Ctxt, ".machosymstr", 0)
+ s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
+ s3 := Linklookup(Ctxt, ".linkedit.got", 0)
+ s4 := Linklookup(Ctxt, ".machosymstr", 0)
if Linkmode != LinkExternal {
- ms = newMachoSeg("__LINKEDIT", 0)
+ ms := newMachoSeg("__LINKEDIT", 0)
ms.vaddr = uint64(va) + uint64(v) + uint64(Rnd(int64(Segdata.Length), int64(INITRND)))
ms.vsize = uint64(s1.Size) + uint64(s2.Size) + uint64(s3.Size) + uint64(s4.Size)
ms.fileoffset = uint64(linkoff)
ms.prot2 = 3
}
- ml = newMachoLoad(2, 4) /* LC_SYMTAB */
+ ml := newMachoLoad(2, 4) /* LC_SYMTAB */
ml.data[0] = uint32(linkoff) /* symoff */
ml.data[1] = uint32(nsortsym) /* nsyms */
ml.data[2] = uint32(linkoff + s1.Size + s2.Size + s3.Size) /* stroff */
machodysymtab()
if Linkmode != LinkExternal {
- ml = newMachoLoad(14, 6) /* LC_LOAD_DYLINKER */
- ml.data[0] = 12 /* offset to string */
+ ml := newMachoLoad(14, 6) /* LC_LOAD_DYLINKER */
+ ml.data[0] = 12 /* offset to string */
stringtouint32(ml.data[1:], "/usr/lib/dyld")
- for i = 0; i < len(dylib); i++ {
+ for i := 0; i < len(dylib); i++ {
ml = newMachoLoad(12, 4+(uint32(len(dylib[i]))+1+7)/8*2) /* LC_LOAD_DYLIB */
ml.data[0] = 24 /* offset of string from beginning of load */
ml.data[1] = 0 /* time stamp */
dwarfaddmachoheaders()
}
- a = machowrite()
+ a := machowrite()
if int32(a) > HEADR {
Diag("HEADR too small: %d > %d", a, HEADR)
}
}
func (x machoscmp) Less(i, j int) bool {
- var s1 *LSym
- var s2 *LSym
- var k1 int
- var k2 int
-
- s1 = x[i]
- s2 = x[j]
+ s1 := x[i]
+ s2 := x[j]
- k1 = symkind(s1)
- k2 = symkind(s2)
+ k1 := symkind(s1)
+ k2 := symkind(s2)
if k1 != k2 {
return k1-k2 < 0
}
}
func machogenasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
- var s *LSym
-
genasmsym(put)
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type == SDYNIMPORT || s.Type == SHOSTOBJ {
if s.Reachable {
put(s, "", 'D', 0, 0, 0, nil)
}
func machosymorder() {
- var i int
-
// On Mac OS X Mountain Lion, we must sort exported symbols
// So we sort them here and pre-allocate dynid for them
// See http://golang.org/issue/4029
- for i = 0; i < len(dynexp); i++ {
+ for i := 0; i < len(dynexp); i++ {
dynexp[i].Reachable = true
}
machogenasmsym(addsym)
nsortsym = 0
machogenasmsym(addsym)
sort.Sort(machoscmp(sortsym[:nsortsym]))
- for i = 0; i < nsortsym; i++ {
+ for i := 0; i < nsortsym; i++ {
sortsym[i].Dynid = int32(i)
}
}
func machosymtab() {
- var i int
- var symtab *LSym
- var symstr *LSym
var s *LSym
var o *LSym
var p string
- symtab = Linklookup(Ctxt, ".machosymtab", 0)
- symstr = Linklookup(Ctxt, ".machosymstr", 0)
+ symtab := Linklookup(Ctxt, ".machosymtab", 0)
+ symstr := Linklookup(Ctxt, ".machosymstr", 0)
- for i = 0; i < nsortsym; i++ {
+ for i := 0; i < nsortsym; i++ {
s = sortsym[i]
Adduint32(Ctxt, symtab, uint32(symstr.Size))
}
func machodysymtab() {
- var n int
- var ml *MachoLoad
- var s1 *LSym
- var s2 *LSym
- var s3 *LSym
-
- ml = newMachoLoad(11, 18) /* LC_DYSYMTAB */
+ ml := newMachoLoad(11, 18) /* LC_DYSYMTAB */
- n = 0
+ n := 0
ml.data[0] = uint32(n) /* ilocalsym */
ml.data[1] = uint32(nkind[SymKindLocal]) /* nlocalsym */
n += nkind[SymKindLocal]
ml.data[11] = 0 /* nextrefsyms */
// must match domacholink below
- s1 = Linklookup(Ctxt, ".machosymtab", 0)
+ s1 := Linklookup(Ctxt, ".machosymtab", 0)
- s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
- s3 = Linklookup(Ctxt, ".linkedit.got", 0)
+ s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
+ s3 := Linklookup(Ctxt, ".linkedit.got", 0)
ml.data[12] = uint32(linkoff + s1.Size) /* indirectsymoff */
ml.data[13] = uint32((s2.Size + s3.Size) / 4) /* nindirectsyms */
}
func Domacholink() int64 {
- var size int
- var s1 *LSym
- var s2 *LSym
- var s3 *LSym
- var s4 *LSym
-
machosymtab()
// write data that will be linkedit section
- s1 = Linklookup(Ctxt, ".machosymtab", 0)
+ s1 := Linklookup(Ctxt, ".machosymtab", 0)
- s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
- s3 = Linklookup(Ctxt, ".linkedit.got", 0)
- s4 = Linklookup(Ctxt, ".machosymstr", 0)
+ s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
+ s3 := Linklookup(Ctxt, ".linkedit.got", 0)
+ s4 := Linklookup(Ctxt, ".machosymstr", 0)
// Force the linkedit section to end on a 16-byte
// boundary. This allows pure (non-cgo) Go binaries
Adduint8(Ctxt, s4, 0)
}
- size = int(s1.Size + s2.Size + s3.Size + s4.Size)
+ size := int(s1.Size + s2.Size + s3.Size + s4.Size)
if size > 0 {
linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND)) + Rnd(int64(Segdata.Filelen), int64(INITRND)) + Rnd(int64(Segdwarf.Filelen), int64(INITRND))
}
func machorelocsect(sect *Section, first *LSym) {
- var sym *LSym
- var eaddr int32
- var ri int
- var r *Reloc
-
// If main section has no bits, nothing to relocate.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
return
}
sect.Reloff = uint64(Cpos())
+ var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
}
}
- eaddr = int32(sect.Vaddr + sect.Length)
+ eaddr := int32(sect.Vaddr + sect.Length)
+ var r *Reloc
+ var ri int
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
}
func Machoemitreloc() {
- var sect *Section
-
for Cpos()&7 != 0 {
Cput(0)
}
machorelocsect(Segtext.Sect, Ctxt.Textp)
- for sect = Segtext.Sect.Next; sect != nil; sect = sect.Next {
+ for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
machorelocsect(sect, datap)
}
- for sect = Segdata.Sect; sect != nil; sect = sect.Next {
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
machorelocsect(sect, datap)
}
}
var endmagic string = "\xff\xffgo13ld"
func ldobjfile(ctxt *Link, f *Biobuf, pkg string, length int64, pn string) {
- var c int
- var buf [8]uint8
- var start int64
- var lib string
-
- start = Boffset(f)
+ start := Boffset(f)
ctxt.Version++
- buf = [8]uint8{}
+ buf := [8]uint8{}
Bread(f, buf[:])
if string(buf[:]) != startmagic {
log.Fatalf("%s: invalid file start %x %x %x %x %x %x %x %x", pn, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7])
}
- c = Bgetc(f)
+ c := Bgetc(f)
if c != 1 {
log.Fatalf("%s: invalid file version number %d", pn, c)
}
+ var lib string
for {
lib = rdstring(f)
if lib == "" {
var readsym_ndup int
func readsym(ctxt *Link, f *Biobuf, pkg string, pn string) {
- var i int
- var j int
- var c int
- var t int
- var v int
- var n int
- var nreloc int
- var size int
- var dupok int
- var name string
- var data []byte
- var r *Reloc
- var s *LSym
- var dup *LSym
- var typ *LSym
- var pc *Pcln
- var a *Auto
-
if Bgetc(f) != 0xfe {
log.Fatalf("readsym out of sync")
}
- t = int(rdint(f))
- name = expandpkg(rdstring(f), pkg)
- v = int(rdint(f))
+ t := int(rdint(f))
+ name := expandpkg(rdstring(f), pkg)
+ v := int(rdint(f))
if v != 0 && v != 1 {
log.Fatalf("invalid symbol version %d", v)
}
- dupok = int(rdint(f))
+ dupok := int(rdint(f))
dupok &= 1
- size = int(rdint(f))
- typ = rdsym(ctxt, f, pkg)
+ size := int(rdint(f))
+ typ := rdsym(ctxt, f, pkg)
+ var data []byte
rddata(f, &data)
- nreloc = int(rdint(f))
+ nreloc := int(rdint(f))
if v != 0 {
v = ctxt.Version
}
- s = Linklookup(ctxt, name, v)
- dup = nil
+ s := Linklookup(ctxt, name, v)
+ dup := (*LSym)(nil)
if s.Type != 0 && s.Type != SXREF {
if (t == SDATA || t == SBSS || t == SNOPTRBSS) && len(data) == 0 && nreloc == 0 {
if s.Size < int64(size) {
if nreloc > 0 {
s.R = make([]Reloc, nreloc)
s.R = s.R[:nreloc]
- for i = 0; i < nreloc; i++ {
+ var r *Reloc
+ for i := 0; i < nreloc; i++ {
r = &s.R[i]
r.Off = int32(rdint(f))
r.Siz = uint8(rdint(f))
s.Args = int32(rdint(f))
s.Locals = int32(rdint(f))
s.Nosplit = uint8(rdint(f))
- v = int(rdint(f))
+ v := int(rdint(f))
s.Leaf = uint8(v & 1)
s.Cfunc = uint8(v & 2)
- n = int(rdint(f))
- for i = 0; i < n; i++ {
+ n := int(rdint(f))
+ var a *Auto
+ for i := 0; i < n; i++ {
a = new(Auto)
a.Asym = rdsym(ctxt, f, pkg)
a.Aoffset = int32(rdint(f))
}
s.Pcln = new(Pcln)
- pc = s.Pcln
+ pc := s.Pcln
rddata(f, &pc.Pcsp.P)
rddata(f, &pc.Pcfile.P)
rddata(f, &pc.Pcline.P)
n = int(rdint(f))
pc.Pcdata = make([]Pcdata, n)
pc.Npcdata = n
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
rddata(f, &pc.Pcdata[i].P)
}
n = int(rdint(f))
pc.Funcdata = make([]*LSym, n)
pc.Funcdataoff = make([]int64, n)
pc.Nfuncdata = n
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
pc.Funcdata[i] = rdsym(ctxt, f, pkg)
}
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
pc.Funcdataoff[i] = rdint(f)
}
n = int(rdint(f))
pc.File = make([]*LSym, n)
pc.Nfile = n
- for i = 0; i < n; i++ {
+ for i := 0; i < n; i++ {
pc.File[i] = rdsym(ctxt, f, pkg)
}
fmt.Fprintf(ctxt.Bso, " args=%#x locals=%#x", uint64(s.Args), uint64(s.Locals))
}
fmt.Fprintf(ctxt.Bso, "\n")
- for i = 0; i < len(s.P); {
+ var c int
+ var j int
+ for i := 0; i < len(s.P); {
fmt.Fprintf(ctxt.Bso, "\t%#04x", uint(i))
for j = i; j < i+16 && j < len(s.P); j++ {
fmt.Fprintf(ctxt.Bso, " %02x", s.P[j])
i += 16
}
- for i = 0; i < len(s.R); i++ {
+ var r *Reloc
+ for i := 0; i < len(s.R); i++ {
r = &s.R[i]
fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%d\n", int(r.Off), r.Siz, r.Type, r.Sym.Name, int64(r.Add))
}
func rdint(f *Biobuf) int64 {
var c int
- var uv uint64
- var shift int
- uv = 0
- for shift = 0; ; shift += 7 {
+ uv := uint64(0)
+ for shift := 0; ; shift += 7 {
if shift >= 64 {
log.Fatalf("corrupt input")
}
var symbuf []byte
func rdsym(ctxt *Link, f *Biobuf, pkg string) *LSym {
- var n int
- var v int
- var p string
- var s *LSym
-
- n = int(rdint(f))
+ n := int(rdint(f))
if n == 0 {
rdint(f)
return nil
symbuf = make([]byte, n)
}
Bread(f, symbuf[:n])
- p = string(symbuf[:n])
- v = int(rdint(f))
+ p := string(symbuf[:n])
+ v := int(rdint(f))
if v != 0 {
v = ctxt.Version
}
- s = Linklookup(ctxt, expandpkg(p, pkg), v)
+ s := Linklookup(ctxt, expandpkg(p, pkg), v)
if v == 0 && s.Name[0] == '$' && s.Type == 0 {
if strings.HasPrefix(s.Name, "$f32.") {
- var i32 int32
x, _ := strconv.ParseUint(s.Name[5:], 16, 32)
- i32 = int32(x)
+ i32 := int32(x)
s.Type = SRODATA
Adduint32(ctxt, s, uint32(i32))
s.Reachable = false
} else if strings.HasPrefix(s.Name, "$f64.") || strings.HasPrefix(s.Name, "$i64.") {
- var i64 int64
x, _ := strconv.ParseUint(s.Name[5:], 16, 64)
- i64 = int64(x)
+ i64 := int64(x)
s.Type = SRODATA
Adduint64(ctxt, s, uint64(i64))
s.Reachable = false
// iteration over encoded pcdata tables.
func getvarint(pp *[]byte) uint32 {
- var p []byte
- var shift int
- var v uint32
-
- v = 0
- p = *pp
- for shift = 0; ; shift += 7 {
+ v := uint32(0)
+ p := *pp
+ for shift := 0; ; shift += 7 {
v |= uint32(p[0]&0x7F) << uint(shift)
tmp4 := p
p = p[1:]
}
func pciternext(it *Pciter) {
- var v uint32
- var dv int32
-
it.pc = it.nextpc
if it.done != 0 {
return
}
// value delta
- v = getvarint(&it.p)
+ v := getvarint(&it.p)
if v == 0 && it.start == 0 {
it.done = 1
}
it.start = 0
- dv = int32(v>>1) ^ (int32(v<<31) >> 31)
+ dv := int32(v>>1) ^ (int32(v<<31) >> 31)
it.value += dv
// pc delta
// license that can be found in the LICENSE file.
func addvarint(d *Pcdata, val uint32) {
- var n int32
- var v uint32
- var p []byte
-
- n = 0
- for v = val; v >= 0x80; v >>= 7 {
+ n := int32(0)
+ for v := val; v >= 0x80; v >>= 7 {
n++
}
n++
}
d.P = d.P[:old+int(n)]
- p = d.P[old:]
+ p := d.P[old:]
+ var v uint32
for v = val; v >= 0x80; v >>= 7 {
p[0] = byte(v | 0x80)
p = p[1:]
}
func addpctab(ftab *LSym, off int32, d *Pcdata) int32 {
- var start int32
-
- start = int32(len(ftab.P))
+ start := int32(len(ftab.P))
Symgrow(Ctxt, ftab, int64(start)+int64(len(d.P)))
copy(ftab.P[start:], d.P)
}
func ftabaddstring(ftab *LSym, s string) int32 {
- var n int32
- var start int32
-
- n = int32(len(s)) + 1
- start = int32(len(ftab.P))
+ n := int32(len(s)) + 1
+ start := int32(len(ftab.P))
Symgrow(Ctxt, ftab, int64(start)+int64(n)+1)
copy(ftab.P[start:], s)
return start
}
func renumberfiles(ctxt *Link, files []*LSym, d *Pcdata) {
- var i int
var f *LSym
- var out Pcdata
- var it Pciter
- var v uint32
- var oldval int32
- var newval int32
- var val int32
- var dv int32
// Give files numbers.
- for i = 0; i < len(files); i++ {
+ for i := 0; i < len(files); i++ {
f = files[i]
if f.Type != SFILEPATH {
ctxt.Nhistfile++
}
}
- newval = -1
- out = Pcdata{}
+ newval := int32(-1)
+ out := Pcdata{}
+ var dv int32
+ var it Pciter
+ var oldval int32
+ var v uint32
+ var val int32
for pciterinit(ctxt, &it, d); it.done == 0; pciternext(&it) {
// value delta
oldval = it.value
var pclntab_zpcln Pcln
func pclntab() {
- var i int32
- var nfunc int32
- var start int32
- var funcstart int32
- var ftab *LSym
- var s *LSym
- var last *LSym
- var off int32
- var end int32
- var frameptrsize int32
- var funcdata_bytes int64
- var pcln *Pcln
- var it Pciter
-
- funcdata_bytes = 0
- ftab = Linklookup(Ctxt, "runtime.pclntab", 0)
+ funcdata_bytes := int64(0)
+ ftab := Linklookup(Ctxt, "runtime.pclntab", 0)
ftab.Type = SPCLNTAB
ftab.Reachable = true
// function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes]
// end PC [thearch.ptrsize bytes]
// offset to file table [4 bytes]
- nfunc = 0
+ nfunc := int32(0)
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
if container(Ctxt.Cursym) == 0 {
setuintxx(Ctxt, ftab, 8, uint64(nfunc), int64(Thearch.Ptrsize))
nfunc = 0
- last = nil
+ last := (*LSym)(nil)
+ var end int32
+ var frameptrsize int32
+ var funcstart int32
+ var i int32
+ var it Pciter
+ var off int32
+ var pcln *Pcln
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
last = Ctxt.Cursym
if container(Ctxt.Cursym) != 0 {
setaddrplus(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize), last, last.Size)
// Start file table.
- start = int32(len(ftab.P))
+ start := int32(len(ftab.P))
start += int32(-len(ftab.P)) & (int32(Thearch.Ptrsize) - 1)
setuint32(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize)+int64(Thearch.Ptrsize), uint32(start))
Symgrow(Ctxt, ftab, int64(start)+(int64(Ctxt.Nhistfile)+1)*4)
setuint32(Ctxt, ftab, int64(start), uint32(Ctxt.Nhistfile))
- for s = Ctxt.Filesyms; s != nil; s = s.Next {
+ for s := Ctxt.Filesyms; s != nil; s = s.Next {
setuint32(Ctxt, ftab, int64(start)+s.Value*4, uint32(ftabaddstring(ftab, s.Name)))
}
// findfunctab generates a lookup table to quickly find the containing
// function for a pc. See src/runtime/symtab.go:findfunc for details.
func findfunctab() {
- var t *LSym
- var s *LSym
- var e *LSym
- var idx int32
- var i int32
- var j int32
- var nbuckets int32
- var n int32
- var base int32
- var min int64
- var max int64
- var p int64
- var q int64
- var indexes []int32
-
- t = Linklookup(Ctxt, "runtime.findfunctab", 0)
+ t := Linklookup(Ctxt, "runtime.findfunctab", 0)
t.Type = SRODATA
t.Reachable = true
// find min and max address
- min = Ctxt.Textp.Value
+ min := Ctxt.Textp.Value
- max = 0
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ max := int64(0)
+ for s := Ctxt.Textp; s != nil; s = s.Next {
max = s.Value + s.Size
}
// for each subbucket, compute the minimum of all symbol indexes
// that map to that subbucket.
- n = int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE)
+ n := int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE)
- indexes = make([]int32, n)
- for i = 0; i < n; i++ {
+ indexes := make([]int32, n)
+ for i := int32(0); i < n; i++ {
indexes[i] = NOIDX
}
- idx = 0
- for s = Ctxt.Textp; s != nil; s = s.Next {
+ idx := int32(0)
+ var e *LSym
+ var i int32
+ var p int64
+ var q int64
+ for s := Ctxt.Textp; s != nil; s = s.Next {
if container(s) != 0 {
continue
}
}
// allocate table
- nbuckets = int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE)
+ nbuckets := int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE)
Symgrow(Ctxt, t, 4*int64(nbuckets)+int64(n))
// fill in table
- for i = 0; i < nbuckets; i++ {
+ var base int32
+ var j int32
+ for i := int32(0); i < nbuckets; i++ {
base = indexes[i*SUBBUCKETS]
if base == NOIDX {
Diag("hole in findfunctab")
var ncoffsym int
func addpesection(name string, sectsize int, filesize int) *IMAGE_SECTION_HEADER {
- var h *IMAGE_SECTION_HEADER
-
if pensect == 16 {
Diag("too many sections")
Errorexit()
}
- h = &sh[pensect]
+ h := &sh[pensect]
pensect++
copy(h.Name[:], name)
h.VirtualSize = uint32(sectsize)
}
func initdynimport() *Dll {
- var m *Imp
var d *Dll
- var s *LSym
- var dynamic *LSym
dr = nil
- m = nil
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ m := (*Imp)(nil)
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Type != SDYNIMPORT {
continue
}
d.ms = m
}
- dynamic = Linklookup(Ctxt, ".windynamic", 0)
+ dynamic := Linklookup(Ctxt, ".windynamic", 0)
dynamic.Reachable = true
dynamic.Type = SWINDOWS
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
m.s.Type = SWINDOWS | SSUB
m.s.Sub = dynamic.Sub
}
func addimports(datsect *IMAGE_SECTION_HEADER) {
- var isect *IMAGE_SECTION_HEADER
- var n uint64
- var oftbase uint64
- var ftbase uint64
- var startoff int64
- var endoff int64
- var m *Imp
- var d *Dll
- var dynamic *LSym
-
- startoff = Cpos()
- dynamic = Linklookup(Ctxt, ".windynamic", 0)
+ startoff := Cpos()
+ dynamic := Linklookup(Ctxt, ".windynamic", 0)
// skip import descriptor table (will write it later)
- n = 0
+ n := uint64(0)
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
n++
}
Cseek(startoff + int64(binary.Size(&IMAGE_IMPORT_DESCRIPTOR{}))*int64(n+1))
// write dll names
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
d.nameoff = uint64(Cpos()) - uint64(startoff)
strput(d.name)
}
// write function names
- for d = dr; d != nil; d = d.next {
+ var m *Imp
+ for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
m.off = uint64(nextsectoff) + uint64(Cpos()) - uint64(startoff)
Wputl(0) // hint
}
// write OriginalFirstThunks
- oftbase = uint64(Cpos()) - uint64(startoff)
+ oftbase := uint64(Cpos()) - uint64(startoff)
n = uint64(Cpos())
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
d.thunkoff = uint64(Cpos()) - n
for m = d.ms; m != nil; m = m.next {
if pe64 != 0 {
// add pe section and pad it at the end
n = uint64(Cpos()) - uint64(startoff)
- isect = addpesection(".idata", int(n), int(n))
+ isect := addpesection(".idata", int(n), int(n))
isect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE
chksectoff(isect, startoff)
strnput("", int(uint64(isect.SizeOfRawData)-n))
- endoff = Cpos()
+ endoff := Cpos()
// write FirstThunks (allocated in .data section)
- ftbase = uint64(dynamic.Value) - uint64(datsect.VirtualAddress) - PEBASE
+ ftbase := uint64(dynamic.Value) - uint64(datsect.VirtualAddress) - PEBASE
Cseek(int64(uint64(datsect.PointerToRawData) + ftbase))
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
if pe64 != 0 {
Vputl(m.off)
// finally write import descriptor table
Cseek(startoff)
- for d = dr; d != nil; d = d.next {
+ for d := dr; d != nil; d = d.next {
Lputl(uint32(uint64(isect.VirtualAddress) + oftbase + d.thunkoff))
Lputl(0)
Lputl(0)
}
func (x pescmp) Less(i, j int) bool {
- var s1 *LSym
- var s2 *LSym
-
- s1 = x[i]
- s2 = x[j]
+ s1 := x[i]
+ s2 := x[j]
return stringsCompare(s1.Extname, s2.Extname) < 0
}
func initdynexport() {
- var s *LSym
-
nexport = 0
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Cgoexport&CgoExportDynamic == 0 {
continue
}
}
func addexports() {
- var sect *IMAGE_SECTION_HEADER
var e IMAGE_EXPORT_DIRECTORY
- var size int
- var i int
- var va int
- var va_name int
- var va_addr int
- var va_na int
- var v int
-
- size = binary.Size(&e) + 10*nexport + len(outfile) + 1
- for i = 0; i < nexport; i++ {
+
+ size := binary.Size(&e) + 10*nexport + len(outfile) + 1
+ for i := 0; i < nexport; i++ {
size += len(dexport[i].Extname) + 1
}
return
}
- sect = addpesection(".edata", size, size)
+ sect := addpesection(".edata", size, size)
sect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ
chksectoff(sect, Cpos())
- va = int(sect.VirtualAddress)
+ va := int(sect.VirtualAddress)
dd[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress = uint32(va)
dd[IMAGE_DIRECTORY_ENTRY_EXPORT].Size = sect.VirtualSize
- va_name = va + binary.Size(&e) + nexport*4
- va_addr = va + binary.Size(&e)
- va_na = va + binary.Size(&e) + nexport*8
+ va_name := va + binary.Size(&e) + nexport*4
+ va_addr := va + binary.Size(&e)
+ va_na := va + binary.Size(&e) + nexport*8
e.Characteristics = 0
e.MajorVersion = 0
binary.Write(&coutbuf, binary.LittleEndian, &e)
// put EXPORT Address Table
- for i = 0; i < nexport; i++ {
+ for i := 0; i < nexport; i++ {
Lputl(uint32(dexport[i].Value - PEBASE))
}
// put EXPORT Name Pointer Table
- v = int(e.Name + uint32(len(outfile)) + 1)
+ v := int(e.Name + uint32(len(outfile)) + 1)
- for i = 0; i < nexport; i++ {
+ for i := 0; i < nexport; i++ {
Lputl(uint32(v))
v += len(dexport[i].Extname) + 1
}
// put EXPORT Ordinal Table
- for i = 0; i < nexport; i++ {
+ for i := 0; i < nexport; i++ {
Wputl(uint16(i))
}
// put Names
strnput(outfile, len(outfile)+1)
- for i = 0; i < nexport; i++ {
+ for i := 0; i < nexport; i++ {
strnput(dexport[i].Extname, len(dexport[i].Extname)+1)
}
strnput("", int(sect.SizeOfRawData-uint32(size)))
}
func dope() {
- var rel *LSym
-
/* relocation table */
- rel = Linklookup(Ctxt, ".rel", 0)
+ rel := Linklookup(Ctxt, ".rel", 0)
rel.Reachable = true
rel.Type = SELFROSECT
* <http://www.microsoft.com/whdc/system/platform/firmware/PECOFFdwn.mspx>
*/
func newPEDWARFSection(name string, size int64) *IMAGE_SECTION_HEADER {
- var h *IMAGE_SECTION_HEADER
- var s string
- var off int
-
if size == 0 {
return nil
}
- off = strtbladd(name)
- s = fmt.Sprintf("/%d", off)
- h = addpesection(s, int(size), int(size))
+ off := strtbladd(name)
+ s := fmt.Sprintf("/%d", off)
+ h := addpesection(s, int(size), int(size))
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE
return h
}
func addpesym(s *LSym, name string, type_ int, addr int64, size int64, ver int, gotype *LSym) {
- var cs *COFFSym
-
if s == nil {
return
}
}
if coffsym != nil {
- cs = &coffsym[ncoffsym]
+ cs := &coffsym[ncoffsym]
cs.sym = s
if len(s.Name) > 8 {
cs.strtbloff = strtbladd(s.Name)
}
func addpesymtable() {
- var h *IMAGE_SECTION_HEADER
- var i int
- var size int
- var s *COFFSym
-
if Debug['s'] == 0 {
genasmsym(addpesym)
coffsym = make([]COFFSym, ncoffsym)
genasmsym(addpesym)
}
- size = len(strtbl) + 4 + 18*ncoffsym
- h = addpesection(".symtab", size, size)
+ size := len(strtbl) + 4 + 18*ncoffsym
+ h := addpesection(".symtab", size, size)
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE
chksectoff(h, Cpos())
fh.PointerToSymbolTable = uint32(Cpos())
fh.NumberOfSymbols = uint32(ncoffsym)
// put COFF symbol table
- for i = 0; i < ncoffsym; i++ {
+ var s *COFFSym
+ for i := 0; i < ncoffsym; i++ {
s = &coffsym[i]
if s.strtbloff == 0 {
strnput(s.sym.Name, 8)
// put COFF string table
Lputl(uint32(len(strtbl)) + 4)
- for i = 0; i < len(strtbl); i++ {
+ for i := 0; i < len(strtbl); i++ {
Cput(uint8(strtbl[i]))
}
strnput("", int(h.SizeOfRawData-uint32(size)))
}
func addpersrc() {
- var h *IMAGE_SECTION_HEADER
- var p []byte
- var val uint32
- var r *Reloc
- var ri int
-
if rsrcsym == nil {
return
}
- h = addpesection(".rsrc", int(rsrcsym.Size), int(rsrcsym.Size))
+ h := addpesection(".rsrc", int(rsrcsym.Size), int(rsrcsym.Size))
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_INITIALIZED_DATA
chksectoff(h, Cpos())
// relocation
- for ri = 0; ri < len(rsrcsym.R); ri++ {
+ var p []byte
+ var r *Reloc
+ var val uint32
+ for ri := 0; ri < len(rsrcsym.R); ri++ {
r = &rsrcsym.R[ri]
p = rsrcsym.P[r.Off:]
val = uint32(int64(h.VirtualAddress) + r.Add)
}
func Asmbpe() {
- var t *IMAGE_SECTION_HEADER
- var d *IMAGE_SECTION_HEADER
-
switch Thearch.Thechar {
default:
Diag("unknown PE architecture")
fh.Machine = IMAGE_FILE_MACHINE_I386
}
- t = addpesection(".text", int(Segtext.Length), int(Segtext.Length))
+ t := addpesection(".text", int(Segtext.Length), int(Segtext.Length))
t.Characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ
chksectseg(t, &Segtext)
textsect = pensect
- d = addpesection(".data", int(Segdata.Length), int(Segdata.Filelen))
+ d := addpesection(".data", int(Segdata.Length), int(Segdata.Filelen))
d.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE
chksectseg(d, &Segdata)
datasect = pensect
}
func linknew(arch *LinkArch) *Link {
- var ctxt *Link
- var p string
- var buf string
-
- ctxt = new(Link)
+ ctxt := new(Link)
ctxt.Hash = make(map[symVer]*LSym)
ctxt.Arch = arch
ctxt.Version = HistVersion
ctxt.Goroot = obj.Getgoroot()
- p = obj.Getgoarch()
+ p := obj.Getgoarch()
if p != arch.Name {
log.Fatalf("invalid goarch %s (want %s)", p, arch.Name)
}
+ var buf string
buf, _ = os.Getwd()
if buf == "" {
buf = "/???"
// On arm, record goarm.
if ctxt.Arch.Thechar == '5' {
- p = obj.Getgoarm()
+ p := obj.Getgoarm()
if p != "" {
ctxt.Goarm = int32(obj.Atoi(p))
} else {
}
func linknewsym(ctxt *Link, symb string, v int) *LSym {
- var s *LSym
-
- s = new(LSym)
+ s := new(LSym)
*s = LSym{}
s.Dynid = -1
var headstr_buf string
func Headstr(v int) string {
- var i int
-
- for i = 0; i < len(headers); i++ {
+ for i := 0; i < len(headers); i++ {
if v == headers[i].val {
return headers[i].name
}
}
func headtype(name string) int {
- var i int
-
- for i = 0; i < len(headers); i++ {
+ for i := 0; i < len(headers); i++ {
if name == headers[i].name {
return headers[i].val
}
var maxelfstr int
func putelfstr(s string) int {
- var off int
- var n int
-
if len(Elfstrdat) == 0 && s != "" {
// first entry must be empty string
putelfstr("")
// Rewrite · to . for ASCII-only tools like DTrace (sigh)
s = strings.Replace(s, "·", ".", -1)
- n = len(s) + 1
+ n := len(s) + 1
for len(Elfstrdat)+n > cap(Elfstrdat) {
Elfstrdat = append(Elfstrdat[:cap(Elfstrdat)], 0)[:len(Elfstrdat)]
}
- off = len(Elfstrdat)
+ off := len(Elfstrdat)
Elfstrdat = Elfstrdat[:off+n]
copy(Elfstrdat[off:], s)
var elfbind int
func putelfsym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *LSym) {
- var bind int
var type_ int
- var off int
- var other int
- var xo *LSym
switch t {
default:
type_ = STT_OBJECT
}
- xo = x
+ xo := x
for xo.Outer != nil {
xo = xo.Outer
}
// One pass for each binding: STB_LOCAL, STB_GLOBAL,
// maybe one day STB_WEAK.
- bind = STB_GLOBAL
+ bind := STB_GLOBAL
if ver != 0 || (x.Type&SHIDDEN != 0) {
bind = STB_LOCAL
return
}
- off = putelfstr(s)
+ off := putelfstr(s)
if Linkmode == LinkExternal {
addr -= int64((xo.Sect.(*Section)).Vaddr)
}
- other = 2
+ other := 2
if x.Type&SHIDDEN != 0 {
other = 0
}
}
func putelfsymshndx(sympos int64, shndx int) {
- var here int64
-
- here = Cpos()
+ here := Cpos()
switch Thearch.Thechar {
case '6':
Cseek(sympos + 6)
}
func Asmelfsym() {
- var s *LSym
- var name string
-
// the first symbol entry is reserved
putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0)
genasmsym(putelfsym)
if Linkmode == LinkExternal && HEADTYPE != Hopenbsd {
- s = Linklookup(Ctxt, "runtime.tlsg", 0)
+ s := Linklookup(Ctxt, "runtime.tlsg", 0)
if s.Sect == nil {
Ctxt.Cursym = nil
Diag("missing section for %s", s.Name)
elfglobalsymndx = numelfsym
genasmsym(putelfsym)
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ var name string
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type != SHOSTOBJ && (s.Type != SDYNIMPORT || !s.Reachable) {
continue
}
}
func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *LSym) {
- var i int
- var l int
-
switch t {
case 'T',
'L',
'z',
'Z',
'm':
- l = 4
+ l := 4
if HEADTYPE == Hplan9 && Thearch.Thechar == '6' && Debug['8'] == 0 {
Lputb(uint32(addr >> 32))
l = 8
Lputb(uint32(addr))
Cput(uint8(t + 0x80)) /* 0x80 is variable length */
+ var i int
if t == 'z' || t == 'Z' {
Cput(uint8(s[0]))
for i = 1; s[i] != 0 || s[i+1] != 0; i += 2 {
}
func symtab() {
- var s *LSym
- var symtype *LSym
- var symtypelink *LSym
- var symgostring *LSym
- var symgofunc *LSym
-
dosymtype()
// Define these so that they'll get put into the symbol table.
xdefine("runtime.esymtab", SRODATA, 0)
// garbage collection symbols
- s = Linklookup(Ctxt, "runtime.gcdata", 0)
+ s := Linklookup(Ctxt, "runtime.gcdata", 0)
s.Type = SRODATA
s.Size = 0
s.Type = STYPE
s.Size = 0
s.Reachable = true
- symtype = s
+ symtype := s
s = Linklookup(Ctxt, "go.string.*", 0)
s.Type = SGOSTRING
s.Size = 0
s.Reachable = true
- symgostring = s
+ symgostring := s
s = Linklookup(Ctxt, "go.func.*", 0)
s.Type = SGOFUNC
s.Size = 0
s.Reachable = true
- symgofunc = s
+ symgofunc := s
- symtypelink = Linklookup(Ctxt, "runtime.typelink", 0)
+ symtypelink := Linklookup(Ctxt, "runtime.typelink", 0)
symt = Linklookup(Ctxt, "runtime.symtab", 0)
symt.Type = SSYMTAB
// within a type they sort by size, so the .* symbols
// just defined above will be first.
// hide the specific symbols.
- for s = Ctxt.Allsym; s != nil; s = s.Allsym {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Special != 0 || s.Type != SRODATA {
continue
}
func plan9quote(s string) string {
if s == "" {
- goto needquote
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
for i := 0; i < len(s); i++ {
if s[i] <= ' ' || s[i] == '\'' {
- goto needquote
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
}
return s
-
-needquote:
- return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
func tokenize(s string) []string {
// In rare cases, asmoutnacl might split p into two instructions.
// origPC is the PC for this Prog (no padding is taken into account).
func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
- var size int
- var reg int
- var q *obj.Prog
- var a *obj.Addr
- var a2 *obj.Addr
-
- size = int(o.size)
+ size := int(o.size)
// instruction specific
switch p.As {
// split it into two instructions:
// ADD $-100004, R13
// MOVW R14, 0(R13)
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
p.Scond &^= C_WBIT
*q = *p
- a = &p.To
+ a := &p.To
+ var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
} else {
}
if (p.To.Type == obj.TYPE_MEM && p.To.Reg != REG_R13 && p.To.Reg != REG_R9) || (p.From.Type == obj.TYPE_MEM && p.From.Reg != REG_R13 && p.From.Reg != REG_R9) { // MOVW Rx, X(Ry), y != 13 && y != 9 // MOVW X(Rx), Ry, x != 13 && x != 9
+ var a *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a = &p.To
} else {
a = &p.From
}
- reg = int(a.Reg)
+ reg := int(a.Reg)
if size == 4 {
// if addr.reg == 0, then it is probably load from x(FP) with small x, no need to modify.
if reg == 0 {
if p.Scond&(C_PBIT|C_WBIT) != 0 {
ctxt.Diag("unsupported instruction (.P/.W): %v", p)
}
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
*q = *p
+ var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
} else {
func span5(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var op *obj.Prog
- var o *Optab
- var m int
- var bflag int
- var i int
- var v int
- var times int
- var c int32
- var opc int32
- var out [6 + 3]uint32
- var bp []byte
p = cursym.Text
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset + 4)
- c = 0
+ c := int32(0)
op = p
p = p.Link
+ var i int
+ var m int
+ var o *Optab
for ; p != nil || ctxt.Blitrl != nil; (func() { op = p; p = p.Link })() {
if p == nil {
if checkpool(ctxt, op, 0) {
* generate extra passes putting branches
* around jmps to fix. this is rare.
*/
- times = 0
+ times := 0
+ var bflag int
+ var opc int32
+ var out [6 + 3]uint32
for {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
ctxt.Autosize = int32(p.To.Offset + 4)
obj.Symgrow(ctxt, cursym, cursym.Size)
- bp = cursym.P
+ bp := cursym.P
c = int32(p.Pc) // even p->link might need extra padding
+ var v int
for p = p.Link; p != nil; p = p.Link {
ctxt.Pc = p.Pc
ctxt.Curp = p
}
func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
- var q *obj.Prog
-
if ctxt.Blitrl != nil {
if skip != 0 {
if false && skip == 1 {
fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
}
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
q.As = AB
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Link
}
if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
// if pool is not multiple of 16 bytes, add an alignment marker
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
q.As = ADATABUNDLEEND
ctxt.Elitrl.Link = q
}
func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
- var q *obj.Prog
var t obj.Prog
- var c int
- c = aclass(ctxt, a)
+ c := aclass(ctxt, a)
t.Ctxt = ctxt
t.As = AWORD
}
if t.Pcrel == nil {
- for q = ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
+ for q := ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
if q.Pcrel == nil && q.To == t.To {
p.Pcond = q
return
if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
// start a new data bundle
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
q.As = ADATABUNDLE
q.Pc = int64(pool.size)
pool.size += 4
ctxt.Elitrl = q
}
- q = ctxt.NewProg()
+ q := ctxt.NewProg()
*q = t
q.Pc = int64(pool.size)
}
func immrot(v uint32) int32 {
- var i int
-
- for i = 0; i < 16; i++ {
+ for i := 0; i < 16; i++ {
if v&^0xff == 0 {
return int32(uint32(int32(i)<<8) | v | 1<<25)
}
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
- var s *obj.LSym
- var t int
-
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
- t = int(immaddr(int32(ctxt.Instoffset)))
+ t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) {
if immfloat(int32(t)) {
case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
- t = int(immaddr(int32(ctxt.Instoffset)))
+ t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) {
if immfloat(int32(t)) {
case obj.TYPE_NONE:
ctxt.Instoffset = a.Offset
- t = int(immaddr(int32(ctxt.Instoffset)))
+ t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) { /* n.b. that it will also satisfy immrot */
if immfloat(int32(t)) {
if immfloat(int32(t)) {
return C_FOREG /* n.b. that it will also satisfy immrot */
}
- t = int(immrot(uint32(ctxt.Instoffset)))
+ t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_SROREG
}
return aconsize(ctxt)
}
- t = int(immrot(uint32(ctxt.Instoffset)))
+ t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_RCON
}
case obj.NAME_EXTERN,
obj.NAME_STATIC:
- s = a.Sym
+ s := a.Sym
if s == nil {
break
}
}
func aconsize(ctxt *obj.Link) int {
- var t int
-
- t = int(immrot(uint32(ctxt.Instoffset)))
+ t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_RACON
}
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
- var a1 int
- var a2 int
- var a3 int
- var r int
- var c1 []byte
- var c3 []byte
- var o []Optab
- var e []Optab
-
- a1 = int(p.Optab)
+ a1 := int(p.Optab)
if a1 != 0 {
return &optab[a1-1:][0]
}
}
a1--
- a3 = int(p.To.Class)
+ a3 := int(p.To.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.To) + 1
p.To.Class = int8(a3)
}
a3--
- a2 = C_NONE
+ a2 := C_NONE
if p.Reg != 0 {
a2 = C_REG
}
- r = int(p.As)
- o = oprange[r].start
+ r := int(p.As)
+ o := oprange[r].start
if o == nil {
o = oprange[r].stop /* just generate an error */
}
fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
}
- e = oprange[r].stop
- c1 = xcmp[a1][:]
- c3 = xcmp[a3][:]
+ e := oprange[r].stop
+ c1 := xcmp[a1][:]
+ c3 := xcmp[a3][:]
for ; -cap(o) < -cap(e); o = o[1:] {
if int(o[0].a2) == a2 {
if c1[o[0].a1] != 0 {
}
func (x ocmp) Less(i, j int) bool {
- var p1 *Optab
- var p2 *Optab
- var n int
-
- p1 = &x[i]
- p2 = &x[j]
- n = int(p1.as) - int(p2.as)
+ p1 := &x[i]
+ p2 := &x[j]
+ n := int(p1.as) - int(p2.as)
if n != 0 {
return n < 0
}
}
func buildop(ctxt *obj.Link) {
- var i int
var n int
- var r int
- for i = 0; i < C_GOK; i++ {
+ for i := 0; i < C_GOK; i++ {
for n = 0; n < C_GOK; n++ {
if cmp(n, i) {
xcmp[i][n] = 1
}
sort.Sort(ocmp(optab[:n]))
- for i = 0; i < n; i++ {
+ var r int
+ for i := 0; i < n; i++ {
r = int(optab[i].as)
oprange[r].start = optab[i:]
for int(optab[i].as) == r {
}
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
- var o1 uint32
- var o2 uint32
- var o3 uint32
- var o4 uint32
- var o5 uint32
- var o6 uint32
- var v int32
- var r int
- var rf int
- var rt int
- var rt2 int
- var rel *obj.Reloc
-
ctxt.Printp = p
- o1 = 0
- o2 = 0
- o3 = 0
- o4 = 0
- o5 = 0
- o6 = 0
+ o1 := uint32(0)
+ o2 := uint32(0)
+ o3 := uint32(0)
+ o4 := uint32(0)
+ o5 := uint32(0)
+ o6 := uint32(0)
ctxt.Armsize += int32(o.size)
if false { /*debug['P']*/
fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_)
case 1: /* op R,[R],R */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- rf = int(p.From.Reg)
- rt = int(p.To.Reg)
- r = int(p.Reg)
+ rf := int(p.From.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
- rt = int(p.To.Reg)
- r = int(p.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
o1 = oprrr(ctxt, AADD, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
case 5: /* bra s */
o1 = opbra(ctxt, int(p.As), int(p.Scond))
- v = -8
+ v := int32(-8)
if p.To.Sym != nil {
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
rel.Sym = p.To.Sym
}
o1 = oprrr(ctxt, ABL, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 0
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 0
rel.Type = obj.R_CALLIND
aclass(ctxt, &p.From)
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
case 9: /* sll R,[R],R -> mov (R<<R),R */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
if p.To.Sym != nil {
// This case happens with words generated
// in the PC stream as part of the literal pool.
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
}
o2 = oprrr(ctxt, int(p.As), int(p.Scond))
o2 |= REGTMP & 15
- r = int(p.Reg)
+ r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
r = 0
} else if r == 0 {
o2 = oprrr(ctxt, ASRA, int(p.Scond))
}
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
o1 |= (uint32(p.From.Reg)&15)<<0 | (uint32(r)&15)<<12
o2 |= uint32(r)&15 | (uint32(r)&15)<<12
if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
case 15: /* mul r,[r,]r */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- rf = int(p.From.Reg)
- rt = int(p.To.Reg)
- r = int(p.Reg)
+ rf := int(p.From.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
if r == 0 {
r = rt
}
case 17:
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- rf = int(p.From.Reg)
- rt = int(p.To.Reg)
- rt2 = int(p.To.Offset)
- r = int(p.Reg)
+ rf := int(p.From.Reg)
+ rt := int(p.To.Reg)
+ rt2 := int(p.To.Offset)
+ r := int(p.Reg)
o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16 | (uint32(rt2)&15)<<12
case 20: /* mov/movb/movbu R,O(R) */
aclass(ctxt, &p.To)
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
case 21: /* mov/movbu O(R),R -> lr */
aclass(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
if o1 == 0 {
break
}
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
if o1 == 0 {
break
}
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o2 = oprrr(ctxt, AADD, int(p.Scond))
o2 |= REGTMP & 15
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o1 = 0xe8fd8000
case 50: /* floating point store */
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
o1 = ofsr(ctxt, int(p.As), int(p.From.Reg), v, r, int(p.Scond), p)
case 51: /* floating point load */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
if o1 == 0 {
break
}
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
if o1 == 0 {
break
}
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
case 54: /* floating point arith */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
- rf = int(p.From.Reg)
- rt = int(p.To.Reg)
- r = int(p.Reg)
+ rf := int(p.From.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.Reg)
if r == 0 {
r = rt
if p.As == AMOVF || p.As == AMOVD || p.As == AMOVFD || p.As == AMOVDF || p.As == ASQRTF || p.As == ASQRTD || p.As == AABSF || p.As == AABSD {
o1 = oprrr(ctxt, AAND, int(p.Scond))
o1 |= uint32(immrot(0xff))
- rt = int(p.To.Reg)
- r = int(p.From.Reg)
+ rt := int(p.To.Reg)
+ r := int(p.From.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
case 63: /* bcase */
if p.Pcond != nil {
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
if p.To.Sym != nil && p.To.Sym.Type != 0 {
case 70: /* movh/movhu R,O(R) -> strh */
aclass(ctxt, &p.To)
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
case 71: /* movb/movh/movhu O(R),R -> ldrsb/ldrsh/ldrh */
aclass(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
if o1 == 0 {
break
}
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
if o1 == 0 {
break
}
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o2 = oprrr(ctxt, ASUBF, int(p.Scond))
}
- v = 0x70 // 1.0
- r = (int(p.To.Reg) & 15) << 0
+ v := int32(0x70) // 1.0
+ r := (int(p.To.Reg) & 15) << 0
// movf $1.0, r
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
}
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
o1 |= (uint32(p.To.Reg) & 15) << 12
- v = int32(chipfloat5(ctxt, p.From.U.Dval))
+ v := int32(chipfloat5(ctxt, p.From.U.Dval))
o1 |= (uint32(v) & 0xf) << 0
o1 |= (uint32(v) & 0xf0) << 12
}
func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
- var o1 uint32
- var rt int
- var r int
-
aclass(ctxt, &p.From)
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 := oprrr(ctxt, int(p.As), int(p.Scond))
o1 |= uint32(p.From.Offset)
- rt = int(p.To.Reg)
+ rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
- r = int(p.Reg)
+ r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
r = 0
} else if r == 0 {
}
func oprrr(ctxt *obj.Link, a int, sc int) uint32 {
- var o uint32
-
- o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_SBIT != 0 {
o |= 1 << 20
}
}
func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
- var o uint32
-
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on LDR/STR instruction")
}
- o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
}
func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
- var o uint32
-
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on LDRH/STRH instruction")
}
- o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
}
func osr(ctxt *obj.Link, a int, r int, v int32, b int, sc int) uint32 {
- var o uint32
-
- o = olr(ctxt, v, b, r, sc) ^ (1 << 20)
+ o := olr(ctxt, v, b, r, sc) ^ (1 << 20)
if a != AMOVW {
o |= 1 << 22
}
}
func oshr(ctxt *obj.Link, r int, v int32, b int, sc int) uint32 {
- var o uint32
-
- o = olhr(ctxt, v, b, r, sc) ^ (1 << 20)
+ o := olhr(ctxt, v, b, r, sc) ^ (1 << 20)
return o
}
}
func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
- var o uint32
-
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on FLDR/FSTR instruction")
}
- o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
}
func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
- var v int32
var o1 uint32
if p.Pcond == nil {
aclass(ctxt, a)
- v = immrot(^uint32(ctxt.Instoffset))
+ v := immrot(^uint32(ctxt.Instoffset))
if v == 0 {
ctxt.Diag("missing literal")
prasm(p)
o1 |= uint32(v)
o1 |= (uint32(dr) & 15) << 12
} else {
- v = int32(p.Pcond.Pc - p.Pc - 8)
+ v := int32(p.Pcond.Pc - p.Pc - 8)
o1 = olr(ctxt, v, REGPC, dr, int(p.Scond)&C_SCOND)
}
}
func chipfloat5(ctxt *obj.Link, e float64) int {
- var n int
- var h1 uint32
- var l uint32
- var h uint32
- var ei uint64
-
// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
if ctxt.Goarm < 7 {
- goto no
+ return -1
}
- ei = math.Float64bits(e)
- l = uint32(ei)
- h = uint32(ei >> 32)
+ ei := math.Float64bits(e)
+ l := uint32(ei)
+ h := uint32(ei >> 32)
if l != 0 || h&0xffff != 0 {
- goto no
+ return -1
}
- h1 = h & 0x7fc00000
+ h1 := h & 0x7fc00000
if h1 != 0x40000000 && h1 != 0x3fc00000 {
- goto no
+ return -1
}
- n = 0
+ n := 0
// sign bit (a)
if h&0x80000000 != 0 {
//print("match %.8lux %.8lux %d\n", l, h, n);
return n
-
-no:
- return -1
}
var bigP *obj.Prog
func Pconv(p *obj.Prog) string {
- var str string
- var sc string
- var fp string
-
- var a int
- var s int
-
- a = int(p.As)
- s = int(p.Scond)
- sc = extra[(s&C_SCOND)^C_SCOND_XOR]
+ a := int(p.As)
+ s := int(p.Scond)
+ sc := extra[(s&C_SCOND)^C_SCOND_XOR]
if s&C_SBIT != 0 {
sc += ".S"
}
if s&C_UBIT != 0 { /* ambiguous with FBIT */
sc += ".U"
}
+ var str string
if a == obj.ADATA {
str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v",
p.Pc, p.Line(), Aconv(a), obj.Dconv(p, &p.From), p.From3.Offset, obj.Dconv(p, &p.To))
p.Pc, p.Line(), Aconv(a), sc, obj.Dconv(p, &p.From), Rconv(int(p.Reg)), obj.Dconv(p, &p.To))
}
+ var fp string
fp += str
return fp
}
func Aconv(a int) string {
- var s string
- var fp string
-
- s = "???"
+ s := "???"
if a >= obj.AXXX && a < ALAST {
s = Anames[a]
}
+ var fp string
fp += s
return fp
}
func RAconv(a *obj.Addr) string {
- var str string
- var fp string
-
- var i int
- var v int
-
- str = fmt.Sprintf("GOK-reglist")
+ str := fmt.Sprintf("GOK-reglist")
switch a.Type {
case obj.TYPE_CONST:
if a.Reg != 0 {
if a.Sym != nil {
break
}
- v = int(a.Offset)
+ v := int(a.Offset)
str = ""
- for i = 0; i < NREG; i++ {
+ for i := 0; i < NREG; i++ {
if v&(1<<uint(i)) != 0 {
if str == "" {
str += "[R"
str += "]"
}
+ var fp string
fp += str
return fp
}
}
func DRconv(a int) string {
- var s string
- var fp string
-
- s = "C_??"
+ s := "C_??"
if a >= C_NONE && a <= C_NCLASS {
s = cnames5[a]
}
+ var fp string
fp += s
return fp
}
var progedit_tlsfallback *obj.LSym
func progedit(ctxt *obj.Link, p *obj.Prog) {
- var literal string
- var s *obj.LSym
-
p.From.Class = 0
p.To.Class = 0
switch p.As {
case AMOVF:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
- var i32 uint32
- var f32 float32
- f32 = float32(p.From.U.Dval)
- i32 = math.Float32bits(f32)
- literal = fmt.Sprintf("$f32.%08x", i32)
- s = obj.Linklookup(ctxt, literal, 0)
+ f32 := float32(p.From.U.Dval)
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
case AMOVD:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
- var i64 uint64
- i64 = math.Float64bits(p.From.U.Dval)
- literal = fmt.Sprintf("$f64.%016x", i64)
- s = obj.Linklookup(ctxt, literal, 0)
+ i64 := math.Float64bits(p.From.U.Dval)
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
)
func linkcase(casep *obj.Prog) {
- var p *obj.Prog
-
- for p = casep; p != nil; p = p.Link {
+ for p := casep; p != nil; p = p.Link {
if p.As == ABCASE {
for ; p != nil && p.As == ABCASE; p = p.Link {
p.Pcrel = casep
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var pl *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var q *obj.Prog
- var q1 *obj.Prog
- var q2 *obj.Prog
- var o int
- var autosize int32
- var autoffset int32
-
- autosize = 0
+ autosize := int32(0)
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
}
- q = nil
+ q := (*obj.Prog)(nil)
ctxt.Cursym = cursym
softfloat(ctxt, cursym)
- p = cursym.Text
- autoffset = int32(p.To.Offset)
+ p := cursym.Text
+ autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
// MOVW.nil R3, 0(R1) +4
// CMP R1, R2
// BNE L
- pl = obj.Appendp(ctxt, p)
- p = pl
+ pl := obj.Appendp(ctxt, p)
+ p := pl
p.As = AMOVW
p.From.Type = obj.TYPE_REG
* expand RET
* expand BECOME pseudo
*/
- for p = cursym.Text; p != nil; p = p.Link {
+ var q1 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
case ACASE:
if ctxt.Flag_shared != 0 {
q = p
}
- for p = cursym.Text; p != nil; p = p.Link {
+ var o int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var q2 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
o = int(p.As)
switch o {
case obj.ATEXT:
}
func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var next *obj.Prog
- var symsfloat *obj.LSym
- var wasfloat int
-
if ctxt.Goarm > 5 {
return
}
- symsfloat = obj.Linklookup(ctxt, "_sfloat", 0)
+ symsfloat := obj.Linklookup(ctxt, "_sfloat", 0)
- wasfloat = 0
- for p = cursym.Text; p != nil; p = p.Link {
+ wasfloat := 0
+ for p := cursym.Text; p != nil; p = p.Link {
if p.Pcond != nil {
p.Pcond.Mark |= LABEL
}
}
- for p = cursym.Text; p != nil; p = p.Link {
+ var next *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
case AMOVW:
if isfloatreg(&p.To) || isfloatreg(&p.From) {
}
func follow(ctxt *obj.Link, s *obj.LSym) {
- var firstp *obj.Prog
- var lastp *obj.Prog
-
ctxt.Cursym = s
- firstp = ctxt.NewProg()
- lastp = firstp
+ firstp := ctxt.NewProg()
+ lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
}
func span8(ctxt *obj.Link, s *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var c int32
- var v int32
- var loop int32
- var bp []byte
- var n int
- var m int
- var i int
-
ctxt.Cursym = s
if s.Text == nil || s.Text.Link == nil {
instinit()
}
- for p = s.Text; p != nil; p = p.Link {
+ var v int32
+ for p := s.Text; p != nil; p = p.Link {
if p.To.Type == obj.TYPE_BRANCH {
if p.Pcond == nil {
p.Pcond = p
}
}
- for p = s.Text; p != nil; p = p.Link {
+ var q *obj.Prog
+ for p := s.Text; p != nil; p = p.Link {
p.Back = 2 // use short branches first time through
q = p.Pcond
if q != nil && (q.Back&2 != 0) {
}
}
- n = 0
+ n := 0
+ var bp []byte
+ var c int32
+ var i int
+ var loop int32
+ var m int
+ var p *obj.Prog
for {
loop = 0
for i = 0; i < len(s.R); i++ {
if false { /* debug['a'] > 1 */
fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
+ var i int
for i = 0; i < len(s.P); i++ {
fmt.Printf(" %.2x", s.P[i])
if i%16 == 15 {
fmt.Printf("\n")
}
- for i = 0; i < len(s.R); i++ {
- var r *obj.Reloc
-
- r = &s.R[i]
+ for i := 0; i < len(s.R); i++ {
+ r := &s.R[i]
fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
}
}
}
func instinit() {
- var i int
var c int
- for i = 1; optab[i].as != 0; i++ {
+ for i := 1; optab[i].as != 0; i++ {
c = int(optab[i].as)
if opindex[c] != nil {
log.Fatalf("phase error in optab: %d (%v)", i, Aconv(c))
opindex[c] = &optab[i]
}
- for i = 0; i < Ymax; i++ {
+ for i := 0; i < Ymax; i++ {
ycover[i*Ymax+i] = 1
}
ycover[Ym*Ymax+Yxm] = 1
ycover[Yxr*Ymax+Yxm] = 1
- for i = 0; i < MAXREG; i++ {
+ for i := 0; i < MAXREG; i++ {
reg[i] = -1
if i >= REG_AL && i <= REG_BH {
reg[i] = (i - REG_AL) & 7
}
func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
- var v int32
-
// TODO(rsc): This special case is for SHRQ $3, AX:DX,
// which encodes as SHRQ $32(DX*0), AX.
// Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
ctxt.Diag("TYPE_CONST with symbol: %v", obj.Dconv(p, a))
}
- v = int32(a.Offset)
+ v := int32(a.Offset)
if v == 0 {
return Yi0
}
}
func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
- var v int64
var rel obj.Reloc
- var r *obj.Reloc
- v = int64(vaddr(ctxt, p, a, &rel))
+ v := int64(vaddr(ctxt, p, a, &rel))
if rel.Siz != 0 {
if rel.Siz != 4 {
ctxt.Diag("bad reloc")
}
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
*r = rel
r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
}
}
func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int32 {
- var s *obj.LSym
-
if r != nil {
*r = obj.Reloc{}
}
switch a.Name {
case obj.NAME_STATIC,
obj.NAME_EXTERN:
- s = a.Sym
+ s := a.Sym
if s != nil {
if r == nil {
ctxt.Diag("need reloc for %v", obj.Dconv(p, a))
}
func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int) {
- var v int32
var base int
var rel obj.Reloc
- v = int32(a.Offset)
+ v := int32(a.Offset)
rel.Siz = 0
switch a.Type {
}
if a.Index != REG_NONE && a.Index != REG_TLS {
- base = int(a.Reg)
+ base := int(a.Reg)
switch a.Name {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
putrelv:
if rel.Siz != 0 {
- var r *obj.Reloc
-
if rel.Siz != 4 {
ctxt.Diag("bad rel")
goto bad
}
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
*r = rel
r.Off = int32(ctxt.Curp.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
}
// If a is empty, it returns BX to account for MULB-like instructions
// that might use DX and AX.
func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
- var cana int
- var canb int
- var canc int
- var cand int
-
- cand = 1
- canc = cand
- canb = canc
- cana = canb
+ cand := 1
+ canc := cand
+ canb := canc
+ cana := canb
if a.Type == obj.TYPE_NONE {
cand = 0
}
func doasm(ctxt *obj.Link, p *obj.Prog) {
- var o *Optab
- var q *obj.Prog
- var pp obj.Prog
- var t []byte
- var z int
- var op int
- var ft int
- var tt int
- var breg int
- var v int32
- var pre int32
- var rel obj.Reloc
- var r *obj.Reloc
- var a *obj.Addr
- var yt ytab
-
ctxt.Curp = p // TODO
- pre = int32(prefixof(ctxt, &p.From))
+ pre := int32(prefixof(ctxt, &p.From))
if pre != 0 {
ctxt.Andptr[0] = byte(pre)
p.Tt = uint8(oclass(ctxt, p, &p.To))
}
- ft = int(p.Ft) * Ymax
- tt = int(p.Tt) * Ymax
- o = opindex[p.As]
+ ft := int(p.Ft) * Ymax
+ tt := int(p.Tt) * Ymax
+ o := opindex[p.As]
- z = 0
+ z := 0
+ var a *obj.Addr
+ var op int
+ var q *obj.Prog
+ var r *obj.Reloc
+ var rel obj.Reloc
+ var v int32
+ var yt ytab
for _, yt = range o.ytab {
if ycover[ft+int(yt.from)] != 0 && ycover[tt+int(yt.to)] != 0 {
- goto found
- }
- z += int(yt.zoffset)
- }
- goto domov
-
-found:
- switch o.prefix {
- case Pq: /* 16 bit escape and opcode escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Pf2, /* xmm opcode escape */
- Pf3:
- ctxt.Andptr[0] = byte(o.prefix)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
+ switch o.prefix {
+ case Pq: /* 16 bit escape and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pm: /* opcode escape */
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pe: /* 16 bit escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Pf2, /* xmm opcode escape */
+ Pf3:
+ ctxt.Andptr[0] = byte(o.prefix)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pb: /* botch */
- break
- }
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- op = int(o.op[z])
- switch yt.zcase {
- default:
- ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
- return
+ case Pm: /* opcode escape */
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zpseudo:
- break
+ case Pe: /* 16 bit escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zlit:
- for ; ; z++ {
- op = int(o.op[z])
- if op == 0 {
+ case Pb: /* botch */
break
}
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- case Zlitm_r:
- for ; ; z++ {
op = int(o.op[z])
- if op == 0 {
- break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case Zm_r:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case Zm2_r:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case Zm_r_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case Zm_r_i_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
- ctxt.Andptr[0] = byte(p.To.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
+ switch yt.zcase {
+ default:
+ ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
+ return
- case Zibm_r:
- for {
- tmp2 := z
- z++
- op = int(o.op[tmp2])
- if op == 0 {
+ case Zpseudo:
break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
- ctxt.Andptr[0] = byte(p.To.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zaut_r:
- ctxt.Andptr[0] = 0x8d
- ctxt.Andptr = ctxt.Andptr[1:] /* leal */
- if p.From.Type != obj.TYPE_ADDR {
- ctxt.Diag("asmins: Zaut sb type ADDR")
- }
- p.From.Type = obj.TYPE_MEM
- p.Ft = 0
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
- p.From.Type = obj.TYPE_ADDR
- p.Ft = 0
-
- case Zm_o:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, int(o.op[z+1]))
-
- case Zr_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.From.Reg])
- case Zr_m_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, reg[p.From.Reg])
+ case Zlit:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
- case Zr_m_i_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, reg[p.From.Reg])
- ctxt.Andptr[0] = byte(p.From.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zlitm_r:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
- case Zcallindreg:
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc)
- r.Type = obj.R_CALLIND
- r.Siz = 0
- fallthrough
+ case Zm_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
- // fallthrough
- case Zo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zm2_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
- asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ case Zm_r_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
- case Zm_ibo:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zm_r_i_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zibo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zibm_r:
+ for {
+ tmp2 := z
+ z++
+ op = int(o.op[tmp2])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Z_ib,
- Zib_:
- if yt.zcase == Zib_ {
- a = &p.From
- } else {
- a = &p.To
- }
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zaut_r:
+ ctxt.Andptr[0] = 0x8d
+ ctxt.Andptr = ctxt.Andptr[1:] /* leal */
+ if p.From.Type != obj.TYPE_ADDR {
+ ctxt.Diag("asmins: Zaut sb type ADDR")
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.Ft = 0
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ p.From.Type = obj.TYPE_ADDR
+ p.Ft = 0
- case Zib_rp:
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zm_o:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
- case Zil_rp:
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- if o.prefix == Pe {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, &p.From)
- }
+ case Zr_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
- case Zib_rr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.To.Reg])
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zr_m_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
- case Z_il,
- Zil_:
- if yt.zcase == Zil_ {
- a = &p.From
- } else {
- a = &p.To
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if o.prefix == Pe {
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, a)
- }
+ case Zr_m_i_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zm_ilo,
- Zilo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if yt.zcase == Zilo_m {
- a = &p.From
- asmand(ctxt, p, &p.To, int(o.op[z+1]))
- } else {
- a = &p.To
- asmand(ctxt, p, &p.From, int(o.op[z+1]))
- }
+ case Zcallindreg:
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc)
+ r.Type = obj.R_CALLIND
+ r.Siz = 0
+ fallthrough
- if o.prefix == Pe {
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, a)
- }
+ // fallthrough
+ case Zo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zil_rr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.To.Reg])
- if o.prefix == Pe {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, &p.From)
- }
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
- case Z_rp:
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zm_ibo:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zrp_:
- ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zibo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zclr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.To.Reg])
+ case Z_ib,
+ Zib_:
+ if yt.zcase == Zib_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zcall:
- if p.To.Sym == nil {
- ctxt.Diag("call without target")
- log.Fatalf("bad code")
- }
+ case Zib_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Type = obj.R_CALL
- r.Siz = 4
- r.Sym = p.To.Sym
- r.Add = p.To.Offset
- put4(ctxt, 0)
-
- case Zbr,
- Zjmp,
- Zloop:
- if p.To.Sym != nil {
- if yt.zcase != Zjmp {
- ctxt.Diag("branch to ATEXT")
- log.Fatalf("bad code")
- }
+ case Zil_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Sym = p.To.Sym
- r.Type = obj.R_PCREL
- r.Siz = 4
- put4(ctxt, 0)
- break
- }
+ case Zib_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- // Assumes q is in this function.
- // Fill in backward jump now.
- q = p.Pcond
+ case Z_il,
+ Zil_:
+ if yt.zcase == Zil_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
- if q == nil {
- ctxt.Diag("jmp/branch/loop without target")
- log.Fatalf("bad code")
- }
+ case Zm_ilo,
+ Zilo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if yt.zcase == Zilo_m {
+ a = &p.From
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ } else {
+ a = &p.To
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+ }
- if p.Back&1 != 0 {
- v = int32(q.Pc - (p.Pc + 2))
- if v >= -128 {
- if p.As == AJCXZW {
- ctxt.Andptr[0] = 0x67
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
}
+
+ case Zil_rr:
ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else if yt.zcase == Zloop {
- ctxt.Diag("loop too far: %v", p)
- } else {
- v -= 5 - 2
- if yt.zcase == Zbr {
- ctxt.Andptr[0] = 0x0f
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
ctxt.Andptr = ctxt.Andptr[1:]
- v--
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
}
- ctxt.Andptr[0] = byte(o.op[z+1])
+ case Z_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
+
+ case Zrp_:
+ ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 16)
+
+ case Zclr:
+ ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 24)
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+
+ case Zcall:
+ if p.To.Sym == nil {
+ ctxt.Diag("call without target")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
- }
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_CALL
+ r.Siz = 4
+ r.Sym = p.To.Sym
+ r.Add = p.To.Offset
+ put4(ctxt, 0)
+
+ case Zbr,
+ Zjmp,
+ Zloop:
+ if p.To.Sym != nil {
+ if yt.zcase != Zjmp {
+ ctxt.Diag("branch to ATEXT")
+ log.Fatalf("bad code")
+ }
- break
- }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ put4(ctxt, 0)
+ break
+ }
- // Annotate target; will fill in later.
- p.Forwd = q.Comefrom
+ // Assumes q is in this function.
+ // Fill in backward jump now.
+ q = p.Pcond
- q.Comefrom = p
- if p.Back&2 != 0 { // short
- if p.As == AJCXZW {
- ctxt.Andptr[0] = 0x67
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- } else if yt.zcase == Zloop {
- ctxt.Diag("loop too far: %v", p)
- } else {
- if yt.zcase == Zbr {
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- }
+ if q == nil {
+ ctxt.Diag("jmp/branch/loop without target")
+ log.Fatalf("bad code")
+ }
- case Zcallcon,
- Zjmpcon:
- if yt.zcase == Zcallcon {
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Type = obj.R_PCREL
- r.Siz = 4
- r.Add = p.To.Offset
- put4(ctxt, 0)
+ if p.Back&1 != 0 {
+ v = int32(q.Pc - (p.Pc + 2))
+ if v >= -128 {
+ if p.As == AJCXZW {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if yt.zcase == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ v -= 5 - 2
+ if yt.zcase == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ v--
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
- case Zcallind:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Type = obj.R_ADDR
- r.Siz = 4
- r.Add = p.To.Offset
- r.Sym = p.To.Sym
- put4(ctxt, 0)
-
- case Zbyte:
- v = vaddr(ctxt, p, &p.From, &rel)
- if rel.Siz != 0 {
- rel.Siz = uint8(op)
- r = obj.Addrel(ctxt.Cursym)
- *r = rel
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- }
+ break
+ }
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 1 {
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 2 {
- ctxt.Andptr[0] = byte(v >> 16)
+ // Annotate target; will fill in later.
+ p.Forwd = q.Comefrom
+
+ q.Comefrom = p
+ if p.Back&2 != 0 { // short
+ if p.As == AJCXZW {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if yt.zcase == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ if yt.zcase == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zcallcon,
+ Zjmpcon:
+ if yt.zcase == Zcallcon {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ r.Add = p.To.Offset
+ put4(ctxt, 0)
+
+ case Zcallind:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 24)
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_ADDR
+ r.Siz = 4
+ r.Add = p.To.Offset
+ r.Sym = p.To.Sym
+ put4(ctxt, 0)
+
+ case Zbyte:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ if rel.Siz != 0 {
+ rel.Siz = uint8(op)
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ ctxt.Andptr[0] = byte(v)
ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 1 {
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 2 {
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+
+ case Zmov:
+ goto domov
}
- }
- case Zmov:
- goto domov
+ return
+ }
+ z += int(yt.zoffset)
}
-
- return
+ goto domov
domov:
- for t = []byte(ymovtab); t[0] != 0; t = t[8:] {
+ var pp obj.Prog
+ for t := []byte(ymovtab); t[0] != 0; t = t[8:] {
if p.As == int16(t[0]) {
if ycover[ft+int(t[1])] != 0 {
if ycover[tt+int(t[2])] != 0 {
- goto mfound
+ switch t[3] {
+ default:
+ ctxt.Diag("asmins: unknown mov %d %v", t[3], p)
+
+ case 0: /* lit */
+ for z = 4; t[z] != E; z++ {
+ ctxt.Andptr[0] = t[z]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case 1: /* r,m */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.To, int(t[5]))
+
+ case 2: /* m,r */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.From, int(t[5]))
+
+ case 3: /* r,m - 2op */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, int(t[6]))
+
+ case 4: /* m,r - 2op */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(t[6]))
+
+ case 5: /* load full pointer, trash heap */
+ if t[4] != 0 {
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ switch p.To.Index {
+ default:
+ goto bad
+
+ case REG_DS:
+ ctxt.Andptr[0] = 0xc5
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_SS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb2
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_ES:
+ ctxt.Andptr[0] = 0xc4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_FS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_GS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb5
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case 6: /* double shift */
+ switch p.From.Type {
+ default:
+ goto bad
+
+ case obj.TYPE_CONST:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Index])
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case obj.TYPE_REG:
+ switch p.From.Reg {
+ default:
+ goto bad
+
+ case REG_CL,
+ REG_CX:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Index])
+ }
+ }
+
+ case 7: /* imul rm,r */
+ if t[4] == Pq {
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
+ // where you load the TLS base register into a register and then index off that
+ // register to access the actual TLS variables. Systems that allow direct TLS access
+ // are handled in prefixof above and should not be listed here.
+ case 8: /* mov tls, r */
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
+
+ // ELF TLS base is 0(GS).
+ case obj.Hlinux,
+ obj.Hnacl:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Reg = REG_GS
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Andptr[0] = 0x65
+ ctxt.Andptr = ctxt.Andptr[1:] // GS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+
+ case obj.Hplan9:
+ if ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+ pp.From = obj.Addr{}
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_EXTERN
+ pp.From.Sym = ctxt.Plan9privates
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+
+ // Windows TLS base is always 0x14(FS).
+ case obj.Hwindows:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Reg = REG_FS
+ pp.From.Offset = 0x14
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Andptr[0] = 0x64
+ ctxt.Andptr = ctxt.Andptr[1:] // FS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+ }
+ }
+ return
}
}
}
z = int(p.From.Reg)
if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
- breg = byteswapreg(ctxt, &p.To)
+ breg := byteswapreg(ctxt, &p.To)
if breg != REG_AX {
ctxt.Andptr[0] = 0x87
ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
z = int(p.To.Reg)
if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
- breg = byteswapreg(ctxt, &p.From)
+ breg := byteswapreg(ctxt, &p.From)
if breg != REG_AX {
ctxt.Andptr[0] = 0x87
ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
ctxt.Diag("doasm: notfound t2=%d from=%d to=%d %v", yt.zcase, p.Ft, p.Tt, p)
return
-
-mfound:
- switch t[3] {
- default:
- ctxt.Diag("asmins: unknown mov %d %v", t[3], p)
-
- case 0: /* lit */
- for z = 4; t[z] != E; z++ {
- ctxt.Andptr[0] = t[z]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- case 1: /* r,m */
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmand(ctxt, p, &p.To, int(t[5]))
-
- case 2: /* m,r */
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmand(ctxt, p, &p.From, int(t[5]))
-
- case 3: /* r,m - 2op */
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = t[5]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, int(t[6]))
-
- case 4: /* m,r - 2op */
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = t[5]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, int(t[6]))
-
- case 5: /* load full pointer, trash heap */
- if t[4] != 0 {
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- switch p.To.Index {
- default:
- goto bad
-
- case REG_DS:
- ctxt.Andptr[0] = 0xc5
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_SS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb2
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_ES:
- ctxt.Andptr[0] = 0xc4
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_FS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb4
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_GS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb5
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- case 6: /* double shift */
- switch p.From.Type {
- default:
- goto bad
-
- case obj.TYPE_CONST:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.From.Index])
- ctxt.Andptr[0] = byte(p.From.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case obj.TYPE_REG:
- switch p.From.Reg {
- default:
- goto bad
-
- case REG_CL,
- REG_CX:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = t[5]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, reg[p.From.Index])
- }
- }
-
- case 7: /* imul rm,r */
- if t[4] == Pq {
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- ctxt.Andptr[0] = t[4]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = t[5]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, reg[p.To.Reg])
-
- // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
- // where you load the TLS base register into a register and then index off that
- // register to access the actual TLS variables. Systems that allow direct TLS access
- // are handled in prefixof above and should not be listed here.
- case 8: /* mov tls, r */
- switch ctxt.Headtype {
- default:
- log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
-
- // ELF TLS base is 0(GS).
- case obj.Hlinux,
- obj.Hnacl:
- pp.From = p.From
-
- pp.From.Type = obj.TYPE_MEM
- pp.From.Reg = REG_GS
- pp.From.Offset = 0
- pp.From.Index = REG_NONE
- pp.From.Scale = 0
- ctxt.Andptr[0] = 0x65
- ctxt.Andptr = ctxt.Andptr[1:] // GS
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, reg[p.To.Reg])
-
- case obj.Hplan9:
- if ctxt.Plan9privates == nil {
- ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
- }
- pp.From = obj.Addr{}
- pp.From.Type = obj.TYPE_MEM
- pp.From.Name = obj.NAME_EXTERN
- pp.From.Sym = ctxt.Plan9privates
- pp.From.Offset = 0
- pp.From.Index = REG_NONE
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, reg[p.To.Reg])
-
- // Windows TLS base is always 0x14(FS).
- case obj.Hwindows:
- pp.From = p.From
-
- pp.From.Type = obj.TYPE_MEM
- pp.From.Reg = REG_FS
- pp.From.Offset = 0x14
- pp.From.Index = REG_NONE
- pp.From.Scale = 0
- ctxt.Andptr[0] = 0x64
- ctxt.Andptr = ctxt.Andptr[1:] // FS
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, reg[p.To.Reg])
- }
- }
}
var naclret = []uint8{
}
func asmins(ctxt *obj.Link, p *obj.Prog) {
- var r *obj.Reloc
-
ctxt.Andptr = ctxt.And[:]
if p.As == obj.AUSEFIELD {
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
r.Off = 0
r.Sym = p.From.Sym
r.Type = obj.R_USEFIELD
func Pconv(p *obj.Prog) string {
var str string
- var fp string
switch p.As {
case obj.ADATA:
}
}
+ var fp string
fp += str
return fp
}
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
- var literal string
- var s *obj.LSym
- var q *obj.Prog
-
// See obj6.c for discussion of TLS.
if canuselocaltls(ctxt) {
// Reduce TLS initial exec model to TLS local exec model.
// MOVL off(BX)(TLS*1), BX
// This allows the C compilers to emit references to m and g using the direct off(TLS) form.
if p.As == AMOVL && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
- q = obj.Appendp(ctxt, p)
+ q := obj.Appendp(ctxt, p)
q.As = p.As
q.From.Type = obj.TYPE_MEM
q.From.Reg = p.To.Reg
ACOMISS,
AUCOMISS:
if p.From.Type == obj.TYPE_FCONST {
- var i32 uint32
- var f32 float32
- f32 = float32(p.From.U.Dval)
- i32 = math.Float32bits(f32)
- literal = fmt.Sprintf("$f32.%08x", i32)
- s = obj.Linklookup(ctxt, literal, 0)
+ f32 := float32(p.From.U.Dval)
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
ACOMISD,
AUCOMISD:
if p.From.Type == obj.TYPE_FCONST {
- var i64 uint64
- i64 = math.Float64bits(p.From.U.Dval)
- literal = fmt.Sprintf("$f64.%016x", i64)
- s = obj.Linklookup(ctxt, literal, 0)
+ i64 := math.Float64bits(p.From.U.Dval)
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var autoffset int32
- var deltasp int32
- var a int
-
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
return
}
- p = cursym.Text
- autoffset = int32(p.To.Offset)
+ p := cursym.Text
+ autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
cursym.Locals = autoffset
cursym.Args = p.To.U.Argsize
- q = nil
+ q := (*obj.Prog)(nil)
if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
if q != nil {
q.Pcond = p
}
- deltasp = autoffset
+ deltasp := autoffset
if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
p = obj.Appendp(ctxt, p)
p.As = AJEQ
p.To.Type = obj.TYPE_BRANCH
- p1 = p
+ p1 := p
p = obj.Appendp(ctxt, p)
p.As = ALEAL
p = obj.Appendp(ctxt, p)
p.As = AJNE
p.To.Type = obj.TYPE_BRANCH
- p2 = p
+ p2 := p
p = obj.Appendp(ctxt, p)
p.As = AMOVL
p.As = ASTOSL
}
+ var a int
for ; p != nil; p = p.Link {
a = int(p.From.Name)
if a == obj.NAME_AUTO {
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
- var next *obj.Prog
-
p.As = AMOVL
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_CX
- next = p.Link
+ next := p.Link
progedit(ctxt, p)
for p.Link != next {
p = p.Link
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
- var q *obj.Prog
- var q1 *obj.Prog
-
if ctxt.Debugstack != 0 {
// 8l -K means check not only for stack
// overflow but stack underflow.
p.As = AJCC
p.To.Type = obj.TYPE_BRANCH
p.To.Offset = 4
- q1 = p
+ q1 := p
p = obj.Appendp(ctxt, p)
p.As = AINT
q1.Pcond = p
}
- q1 = nil
+ q1 := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP <= stackguard
p.As = AJHI
p.To.Type = obj.TYPE_BRANCH
p.To.Offset = 4
- q = p
+ q := p
p = obj.Appendp(ctxt, p)
p.As = obj.ACALL
}
func follow(ctxt *obj.Link, s *obj.LSym) {
- var firstp *obj.Prog
- var lastp *obj.Prog
-
ctxt.Cursym = s
- firstp = ctxt.NewProg()
- lastp = firstp
+ firstp := ctxt.NewProg()
+ lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
var xcmp [C_NCLASS][C_NCLASS]uint8
func span9(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var o *Optab
- var m int
- var bflag int
- var c int64
- var otxt int64
- var out [6]uint32
- var i int32
- var bp []byte
-
- p = cursym.Text
+ p := cursym.Text
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
return
}
buildop(ctxt)
}
- c = 0
+ c := int64(0)
p.Pc = c
+ var m int
+ var o *Optab
for p = p.Link; p != nil; p = p.Link {
ctxt.Curp = p
p.Pc = c
* generate extra passes putting branches
* around jmps to fix. this is rare.
*/
- bflag = 1
+ bflag := 1
+ var otxt int64
+ var q *obj.Prog
for bflag != 0 {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
obj.Symgrow(ctxt, cursym, cursym.Size)
- bp = cursym.P
- for p = cursym.Text.Link; p != nil; p = p.Link {
+ bp := cursym.P
+ var i int32
+ var out [6]uint32
+ for p := cursym.Text.Link; p != nil; p = p.Link {
ctxt.Pc = p.Pc
ctxt.Curp = p
o = oplook(ctxt, p)
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
- var s *obj.LSym
-
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
case obj.NAME_EXTERN,
obj.NAME_STATIC:
- s = a.Sym
+ s := a.Sym
if s == nil {
break
}
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
- var a1 int
- var a2 int
- var a3 int
- var a4 int
- var r int
- var c1 []byte
- var c3 []byte
- var c4 []byte
- var o []Optab
- var e []Optab
-
- a1 = int(p.Optab)
+ a1 := int(p.Optab)
if a1 != 0 {
return &optab[a1-1:][0]
}
}
a1--
- a3 = int(p.From3.Class)
+ a3 := int(p.From3.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.From3) + 1
p.From3.Class = int8(a3)
}
a3--
- a4 = int(p.To.Class)
+ a4 := int(p.To.Class)
if a4 == 0 {
a4 = aclass(ctxt, &p.To) + 1
p.To.Class = int8(a4)
}
a4--
- a2 = C_NONE
+ a2 := C_NONE
if p.Reg != 0 {
a2 = C_REG
}
//print("oplook %P %d %d %d %d\n", p, a1, a2, a3, a4);
- r = int(p.As)
+ r := int(p.As)
- o = oprange[r].start
+ o := oprange[r].start
if o == nil {
o = oprange[r].stop /* just generate an error */
}
- e = oprange[r].stop
- c1 = xcmp[a1][:]
- c3 = xcmp[a3][:]
- c4 = xcmp[a4][:]
+ e := oprange[r].stop
+ c1 := xcmp[a1][:]
+ c3 := xcmp[a3][:]
+ c4 := xcmp[a4][:]
for ; -cap(o) < -cap(e); o = o[1:] {
if int(o[0].a2) == a2 {
if c1[o[0].a1] != 0 {
}
func (x ocmp) Less(i, j int) bool {
- var p1 *Optab
- var p2 *Optab
- var n int
-
- p1 = &x[i]
- p2 = &x[j]
- n = int(p1.as) - int(p2.as)
+ p1 := &x[i]
+ p2 := &x[j]
+ n := int(p1.as) - int(p2.as)
if n != 0 {
return n < 0
}
}
func buildop(ctxt *obj.Link) {
- var i int
var n int
- var r int
- for i = 0; i < C_NCLASS; i++ {
+ for i := 0; i < C_NCLASS; i++ {
for n = 0; n < C_NCLASS; n++ {
if cmp(n, i) {
xcmp[i][n] = 1
for n = 0; optab[n].as != obj.AXXX; n++ {
}
sort.Sort(ocmp(optab[:n]))
- for i = 0; i < n; i++ {
+ var r int
+ for i := 0; i < n; i++ {
r = int(optab[i].as)
oprange[r].start = optab[i:]
for int(optab[i].as) == r {
// add R_ADDRPOWER relocation to symbol s for the two instructions o1 and o2.
func addaddrreloc(ctxt *obj.Link, s *obj.LSym, o1 *uint32, o2 *uint32) {
- var rel *obj.Reloc
-
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 8
rel.Sym = s
* 32-bit masks
*/
func getmask(m []byte, v uint32) bool {
- var i int
-
m[1] = 0
m[0] = m[1]
if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
if getmask(m, ^v) {
- i = int(m[0])
+ i := int(m[0])
m[0] = m[1] + 1
m[1] = byte(i - 1)
return true
return false
}
- for i = 0; i < 32; i++ {
+ for i := 0; i < 32; i++ {
if v&(1<<uint(31-i)) != 0 {
m[0] = byte(i)
for {
* 64-bit masks (rldic etc)
*/
func getmask64(m []byte, v uint64) bool {
- var i int
-
m[1] = 0
m[0] = m[1]
- for i = 0; i < 64; i++ {
+ for i := 0; i < 64; i++ {
if v&(uint64(1)<<uint(63-i)) != 0 {
m[0] = byte(i)
for {
}
func loadu32(r int, d int64) uint32 {
- var v int32
-
- v = int32(d >> 16)
+ v := int32(d >> 16)
if isuint32(uint64(d)) {
return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
}
}
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
- var o1 uint32
- var o2 uint32
- var o3 uint32
- var o4 uint32
- var o5 uint32
- var v int32
- var t int32
- var d int64
- var r int
- var a int
- var mask [2]uint8
- var rel *obj.Reloc
-
- o1 = 0
- o2 = 0
- o3 = 0
- o4 = 0
- o5 = 0
+ o1 := uint32(0)
+ o2 := uint32(0)
+ o3 := uint32(0)
+ o4 := uint32(0)
+ o5 := uint32(0)
//print("%P => case %d\n", p, o->type);
switch o.type_ {
case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
//nerrors--;
ctxt.Diag("literal operation on R0\n%v", p)
o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
case 2: /* int/cr/fp op Rb,[Ra],Rd */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
case 3: /* mov $soreg/addcon/ucon, r ==> addis/addi $i,reg',r */
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
- v = int32(d)
- r = int(p.From.Reg)
+ v := int32(d)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
ctxt.Diag("literal operation on R0\n%v", p)
}
- a = OP_ADDI
+ a := OP_ADDI
if o.a1 == C_UCON {
if d&0xffff != 0 {
log.Fatalf("invalid handling of %v", p)
o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
case 4: /* add/mul $scon,[r1],r2 */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 = uint32(oprrr(ctxt, int(p.As)))
case 6: /* logical op Rb,[Rs,]Ra; no literal */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
case 7: /* mov r, soreg ==> stw o(r) */
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
if p.To.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
}
case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if p.From.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
}
case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if p.From.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
case 11: /* br/bl lbra */
- v = 0
+ v := int32(0)
if p.Pcond != nil {
v = int32(p.Pcond.Pc - p.Pc)
o1 = OP_BR(uint32(opirr(ctxt, int(p.As))), uint32(v), 0)
if p.To.Sym != nil {
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
rel.Sym = p.To.Sym
case 12: /* movb r,r (extsb); movw r,r (extsw) */
if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
ctxt.Diag("literal operation on R0\n%v", p)
}
}
case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- d = vregoff(ctxt, &p.From3)
+ d := vregoff(ctxt, &p.From3)
+ var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
+ var a int
switch p.As {
case ARLDCL,
ARLDCLCC:
case 17, /* bc bo,bi,lbra (same for now) */
16: /* bc bo,bi,sbra */
- a = 0
+ a := 0
if p.From.Type == obj.TYPE_CONST {
a = int(regoff(ctxt, &p.From))
}
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
}
- v = 0
+ v := int32(0)
if p.Pcond != nil {
v = int32(p.Pcond.Pc - p.Pc)
}
o1 = OP_BC(uint32(opirr(ctxt, int(p.As))), uint32(a), uint32(r), uint32(v), 0)
case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
+ var v int32
if p.As == ABC || p.As == ABCL {
v = regoff(ctxt, &p.To) & 31
} else {
v = 20 /* unconditional */
}
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
}
o2 = OP_BCR(o2, uint32(v), uint32(r))
case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
+ var v int32
if p.As == ABC || p.As == ABCL {
v = regoff(ctxt, &p.From) & 31
} else {
v = 20 /* unconditional */
}
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
}
o1 = OP_BCR(o1, uint32(v), uint32(r))
case 19: /* mov $lcon,r ==> cau+or */
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
if p.From.Sym == nil {
o1 = loadu32(int(p.To.Reg), d)
//if(dlm) reloc(&p->from, p->pc, 0);
case 20: /* add $ucon,,r */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
if p.To.Reg == REGTMP || p.Reg == REGTMP {
ctxt.Diag("cant synthesize large constant\n%v", p)
}
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
o1 = loadu32(REGTMP, d)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
if p.To.Reg == REGTMP || p.Reg == REGTMP {
ctxt.Diag("cant synthesize large constant\n%v", p)
}
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
o1 = loadu32(REGTMP, d)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
/*24*/
case 25:
/* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
if v < 0 {
v = 0
} else if v > 63 {
v = 63
}
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
+ var a int
switch p.As {
case ASLD,
ASLDCC:
if p.To.Reg == REGTMP {
ctxt.Diag("can't synthesize large constant\n%v", p)
}
- v = regoff(ctxt, &p.From)
- r = int(p.From.Reg)
+ v := regoff(ctxt, &p.From)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
- v = regoff(ctxt, &p.From3)
+ v := regoff(ctxt, &p.From3)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
ctxt.Diag("can't synthesize large constant\n%v", p)
}
- v = regoff(ctxt, &p.From3)
+ v := regoff(ctxt, &p.From3)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
//if(dlm) reloc(&p->from3, p->pc, 0);
case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- d = vregoff(ctxt, &p.From3)
+ d := vregoff(ctxt, &p.From3)
+ var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
+ var a int
switch p.As {
case ARLDC,
ARLDCCC:
}
case 30: /* rldimi $sh,s,$mask,a */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- d = vregoff(ctxt, &p.From3)
+ d := vregoff(ctxt, &p.From3)
+ var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
if int32(mask[1]) != (63 - v) {
ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
}
case 31: /* dword */
- d = vregoff(ctxt, &p.From)
+ d := vregoff(ctxt, &p.From)
if ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = uint32(d >> 32)
}
if p.From.Sym != nil {
- rel = obj.Addrel(ctxt.Cursym)
+ rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 8
rel.Sym = p.From.Sym
}
case 32: /* fmul frc,fra,frd */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
case 33: /* fabs [frb,]frd; fmr. frb,frd */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if oclass(&p.From) == C_NONE {
r = int(p.To.Reg)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6
case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
- r = int(p.To.Reg)
+ r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(regoff(ctxt, &p.From3))&0x7F)<<11
case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, uint32(r), uint32(p.From.Reg))
case 44: /* indexed store */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 45: /* indexed load */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = 0
o1 = uint32(oprrr(ctxt, int(p.As)))
case 47: /* op Ra, Rd; also op [Ra,] Rd */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(p.To.Reg)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
case 48: /* op Rs, Ra */
- r = int(p.From.Reg)
+ r := int(p.From.Reg)
if r == 0 {
r = int(p.To.Reg)
case 49: /* op Rb; op $n, Rb */
if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
- v = regoff(ctxt, &p.From) & 1
+ v := regoff(ctxt, &p.From) & 1
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
} else {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.From.Reg))
}
case 50: /* rem[u] r1[,r2],r3 */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- v = oprrr(ctxt, int(p.As))
- t = v & (1<<10 | 1) /* OE|Rc */
+ v := oprrr(ctxt, int(p.As))
+ t := v & (1<<10 | 1) /* OE|Rc */
o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
}
case 51: /* remd[u] r1[,r2],r3 */
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- v = oprrr(ctxt, int(p.As))
- t = v & (1<<10 | 1) /* OE|Rc */
+ v := oprrr(ctxt, int(p.As))
+ t := v & (1<<10 | 1) /* OE|Rc */
o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
case 52: /* mtfsbNx cr(n) */
- v = regoff(ctxt, &p.From) & 31
+ v := regoff(ctxt, &p.From) & 31
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(v), 0, 0)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(p.From.Reg))
case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
}
case 57: /* slw $sh,[s,]a -> rlwinm ... */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
} else if v > 32 {
v = 32
}
+ var mask [2]uint8
if p.As == ASRW || p.As == ASRWCC { /* shift right */
mask[0] = uint8(v)
mask[1] = 31
}
case 58: /* logical $andcon,[s],a */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
case 59: /* or/and $ucon,,r */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
- r = int(p.Reg)
+ r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */
case 60: /* tw to,a,b */
- r = int(regoff(ctxt, &p.From) & 31)
+ r := int(regoff(ctxt, &p.From) & 31)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
case 61: /* tw to,a,$simm */
- r = int(regoff(ctxt, &p.From) & 31)
+ r := int(regoff(ctxt, &p.From) & 31)
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(v))
case 62: /* rlwmi $sh,s,$mask,a */
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
+ var mask [2]uint8
maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
case 63: /* rlwmi b,s,$mask,a */
+ var mask [2]uint8
maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
case 64: /* mtfsf fr[, $m] {,fpcsr} */
+ var v int32
if p.From3.Type != obj.TYPE_NONE {
v = regoff(ctxt, &p.From3) & 255
} else {
o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(regoff(ctxt, &p.From))&31)<<12
case 66: /* mov spr,r1; mov r1,spr, also dcr */
+ var r int
+ var v int32
if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
r = int(p.From.Reg)
v = int32(p.To.Reg)
case 68: /* mfcr rD; mfocrf CRM,rD */
if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
- v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
+ v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
} else {
o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
}
case 69: /* mtcrf CRM,rS */
+ var v int32
if p.From3.Type != obj.TYPE_NONE {
if p.To.Reg != 0 {
ctxt.Diag("can't use both mask and CR(n)\n%v", p)
o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
case 70: /* [f]cmp r,r,cr*/
+ var r int
if p.Reg == 0 {
r = 0
} else {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
case 71: /* cmp[l] r,i,cr*/
+ var r int
if p.Reg == 0 {
r = 0
} else {
/* relocation operations */
case 74:
- v = regoff(ctxt, &p.To)
+ v := regoff(ctxt, &p.To)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
//if(dlm) reloc(&p->to, p->pc, 1);
case 75:
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
//if(dlm) reloc(&p->from, p->pc, 1);
case 76:
- v = regoff(ctxt, &p.From)
+ v := regoff(ctxt, &p.From)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
var bigP *obj.Prog
func Pconv(p *obj.Prog) string {
- var str string
- var fp string
-
- var a int
-
- a = int(p.As)
+ a := int(p.As)
- str = ""
+ str := ""
if a == obj.ADATA {
str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v",
p.Pc, p.Line(), Aconv(a), obj.Dconv(p, &p.From), p.From3.Offset, obj.Dconv(p, &p.To))
}
if p.Spadj != 0 {
+ var fp string
fp += fmt.Sprintf("%s # spadj=%d", str, p.Spadj)
return fp
}
}
+ var fp string
fp += str
return fp
}
func Aconv(a int) string {
- var s string
- var fp string
-
- s = "???"
+ s := "???"
if a >= obj.AXXX && a < ALAST {
s = Anames[a]
}
+ var fp string
fp += s
return fp
}
}
func DRconv(a int) string {
- var s string
- var fp string
-
- s = "C_??"
+ s := "C_??"
if a >= C_NONE && a <= C_NCLASS {
s = cnames9[a]
}
+ var fp string
fp += s
return fp
}
)
func progedit(ctxt *obj.Link, p *obj.Prog) {
- var literal string
- var s *obj.LSym
-
p.From.Class = 0
p.To.Class = 0
switch p.As {
case AFMOVS:
if p.From.Type == obj.TYPE_FCONST {
- var i32 uint32
- var f32 float32
- f32 = float32(p.From.U.Dval)
- i32 = math.Float32bits(f32)
- literal = fmt.Sprintf("$f32.%08x", i32)
- s = obj.Linklookup(ctxt, literal, 0)
+ f32 := float32(p.From.U.Dval)
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
s.Size = 4
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
case AFMOVD:
if p.From.Type == obj.TYPE_FCONST {
- var i64 uint64
- i64 = math.Float64bits(p.From.U.Dval)
- literal = fmt.Sprintf("$f64.%016x", i64)
- s = obj.Linklookup(ctxt, literal, 0)
+ i64 := math.Float64bits(p.From.U.Dval)
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
s.Size = 8
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
// Put >32-bit constants in memory and load them
case AMOVD:
if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset {
- literal = fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
- s = obj.Linklookup(ctxt, literal, 0)
+ literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
+ s := obj.Linklookup(ctxt, literal, 0)
s.Size = 8
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var q1 *obj.Prog
- var o int
- var mov int
- var aoffset int
- var textstksiz int64
- var autosize int32
-
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
return
}
- p = cursym.Text
- textstksiz = p.To.Offset
+ p := cursym.Text
+ textstksiz := p.To.Offset
cursym.Args = p.To.U.Argsize
cursym.Locals = int32(textstksiz)
}
obj.Bflush(ctxt.Bso)
- q = nil
- for p = cursym.Text; p != nil; p = p.Link {
+ q := (*obj.Prog)(nil)
+ var q1 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
/* too hard, just leave alone */
case obj.ATEXT:
}
}
- autosize = 0
- for p = cursym.Text; p != nil; p = p.Link {
+ autosize := int32(0)
+ var aoffset int
+ var mov int
+ var o int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
o = int(p.As)
switch o {
case obj.ATEXT:
}
*/
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
- var q *obj.Prog
- var q1 *obj.Prog
-
// MOVD g_stackguard(g), R3
p = obj.Appendp(ctxt, p)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
- q = nil
+ q := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
// q1: BLT done
p = obj.Appendp(ctxt, p)
- q1 = p
+ q1 := p
p.As = ABLT
p.To.Type = obj.TYPE_BRANCH
}
func follow(ctxt *obj.Link, s *obj.LSym) {
- var firstp *obj.Prog
- var lastp *obj.Prog
-
ctxt.Cursym = s
- firstp = ctxt.NewProg()
- lastp = firstp
+ firstp := ctxt.NewProg()
+ lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
}
func Linknew(arch *LinkArch) *Link {
- var buf string
-
linksetexp()
ctxt := new(Link)
ctxt.Windows = 1
}
+ var buf string
buf, _ = os.Getwd()
if buf == "" {
buf = "/???"
}
func span6(ctxt *obj.Link, s *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var c int32
- var v int32
- var loop int32
- var bp []byte
- var n int
- var m int
- var i int
-
ctxt.Cursym = s
if s.P != nil {
instinit()
}
- for p = ctxt.Cursym.Text; p != nil; p = p.Link {
+ var v int32
+ for p := ctxt.Cursym.Text; p != nil; p = p.Link {
if p.To.Type == obj.TYPE_BRANCH {
if p.Pcond == nil {
p.Pcond = p
}
}
- for p = s.Text; p != nil; p = p.Link {
+ var q *obj.Prog
+ for p := s.Text; p != nil; p = p.Link {
p.Back = 2 // use short branches first time through
q = p.Pcond
if q != nil && (q.Back&2 != 0) {
}
}
- n = 0
+ n := 0
+ var bp []byte
+ var c int32
+ var i int
+ var loop int32
+ var m int
+ var p *obj.Prog
for {
loop = 0
for i = 0; i < len(s.R); i++ {
if false { /* debug['a'] > 1 */
fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
+ var i int
for i = 0; i < len(s.P); i++ {
fmt.Printf(" %.2x", s.P[i])
if i%16 == 15 {
fmt.Printf("\n")
}
- for i = 0; i < len(s.R); i++ {
- var r *obj.Reloc
-
- r = &s.R[i]
+ for i := 0; i < len(s.R); i++ {
+ r := &s.R[i]
fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
}
}
func instinit() {
var c int
- var i int
- for i = 1; optab[i].as != 0; i++ {
+ for i := 1; optab[i].as != 0; i++ {
c = int(optab[i].as)
if opindex[c] != nil {
log.Fatalf("phase error in optab: %d (%v)", i, Aconv(c))
opindex[c] = &optab[i]
}
- for i = 0; i < Ymax; i++ {
+ for i := 0; i < Ymax; i++ {
ycover[i*Ymax+i] = 1
}
ycover[Ym*Ymax+Yxm] = 1
ycover[Yxr*Ymax+Yxm] = 1
- for i = 0; i < MAXREG; i++ {
+ for i := 0; i < MAXREG; i++ {
reg[i] = -1
if i >= REG_AL && i <= REG_R15B {
reg[i] = (i - REG_AL) & 7
}
func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
- var v int64
- var l int32
-
// TODO(rsc): This special case is for SHRQ $3, AX:DX,
// which encodes as SHRQ $32(DX*0), AX.
// Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
ctxt.Diag("TYPE_CONST with symbol: %v", obj.Dconv(p, a))
}
- v = a.Offset
+ v := a.Offset
if v == 0 {
return Yi0
}
if v >= -128 && v <= 127 {
return Yi8
}
- l = int32(v)
+ l := int32(v)
if int64(l) == v {
return Ys32 /* can sign extend */
}
}
func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
- var v int64
var rel obj.Reloc
- var r *obj.Reloc
- v = vaddr(ctxt, p, a, &rel)
+ v := vaddr(ctxt, p, a, &rel)
if rel.Siz != 0 {
if rel.Siz != 4 {
ctxt.Diag("bad reloc")
}
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
*r = rel
r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
}
}
*/
func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 {
- var s *obj.LSym
-
if r != nil {
*r = obj.Reloc{}
}
switch a.Name {
case obj.NAME_STATIC,
obj.NAME_EXTERN:
- s = a.Sym
+ s := a.Sym
if r == nil {
ctxt.Diag("need reloc for %v", obj.Dconv(p, a))
log.Fatalf("reloc")
}
func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) {
- var v int32
var base int
var rel obj.Reloc
rex &= 0x40 | Rxr
- v = int32(a.Offset)
+ v := int32(a.Offset)
rel.Siz = 0
switch a.Type {
}
if a.Index != REG_NONE && a.Index != REG_TLS {
- base = int(a.Reg)
+ base := int(a.Reg)
switch a.Name {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
putrelv:
if rel.Siz != 0 {
- var r *obj.Reloc
-
if rel.Siz != 4 {
ctxt.Diag("bad rel")
goto bad
}
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
*r = rel
r.Off = int32(ctxt.Curp.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
}
}
func doasm(ctxt *obj.Link, p *obj.Prog) {
- var o *Optab
- var q *obj.Prog
- var pp obj.Prog
- var t []byte
- var mo []Movtab
- var z int
- var op int
- var ft int
- var tt int
- var xo int
- var l int
- var pre int
- var v int64
- var rel obj.Reloc
- var r *obj.Reloc
- var a *obj.Addr
- var yt ytab
-
ctxt.Curp = p // TODO
- o = opindex[p.As]
+ o := opindex[p.As]
if o == nil {
ctxt.Diag("asmins: missing op %v", p)
return
}
- pre = prefixof(ctxt, &p.From)
+ pre := prefixof(ctxt, &p.From)
if pre != 0 {
ctxt.Andptr[0] = byte(pre)
ctxt.Andptr = ctxt.Andptr[1:]
p.Tt = uint8(oclass(ctxt, p, &p.To))
}
- ft = int(p.Ft) * Ymax
- tt = int(p.Tt) * Ymax
+ ft := int(p.Ft) * Ymax
+ tt := int(p.Tt) * Ymax
- xo = bool2int(o.op[0] == 0x0f)
- z = 0
+ xo := bool2int(o.op[0] == 0x0f)
+ z := 0
+ var a *obj.Addr
+ var l int
+ var op int
+ var q *obj.Prog
+ var r *obj.Reloc
+ var rel obj.Reloc
+ var v int64
+ var yt ytab
for _, yt = range o.ytab {
if ycover[ft+int(yt.from)] != 0 && ycover[tt+int(yt.to)] != 0 {
- goto found
- }
- z += int(yt.zoffset) + xo
- }
- goto domov
-
-found:
- switch o.prefix {
- case Pq: /* 16 bit escape and opcode escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Pq3: /* 16 bit escape, Rex.w, and opcode escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = Pw
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Pf2, /* xmm opcode escape */
- Pf3:
- ctxt.Andptr[0] = byte(o.prefix)
- ctxt.Andptr = ctxt.Andptr[1:]
+ switch o.prefix {
+ case Pq: /* 16 bit escape and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pm: /* opcode escape */
- ctxt.Andptr[0] = Pm
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Pq3: /* 16 bit escape, Rex.w, and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pe: /* 16 bit escape */
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pw
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pw: /* 64-bit escape */
- if p.Mode != 64 {
- ctxt.Diag("asmins: illegal 64: %v", p)
- }
- ctxt.Rexflag |= Pw
+ case Pf2, /* xmm opcode escape */
+ Pf3:
+ ctxt.Andptr[0] = byte(o.prefix)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Pb: /* botch */
- bytereg(&p.From, &p.Ft)
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- bytereg(&p.To, &p.Tt)
+ case Pm: /* opcode escape */
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
- case P32: /* 32 bit but illegal if 64-bit mode */
- if p.Mode == 64 {
- ctxt.Diag("asmins: illegal in 64-bit mode: %v", p)
- }
+ case Pe: /* 16 bit escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Py: /* 64-bit only, no prefix */
- if p.Mode != 64 {
- ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
- }
- }
+ case Pw: /* 64-bit escape */
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal 64: %v", p)
+ }
+ ctxt.Rexflag |= Pw
- if z >= len(o.op) {
- log.Fatalf("asmins bad table %v", p)
- }
- op = int(o.op[z])
- if op == 0x0f {
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- z++
- op = int(o.op[z])
- }
+ case Pb: /* botch */
+ bytereg(&p.From, &p.Ft)
- switch yt.zcase {
- default:
- ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
- return
+ bytereg(&p.To, &p.Tt)
- case Zpseudo:
- break
+ case P32: /* 32 bit but illegal if 64-bit mode */
+ if p.Mode == 64 {
+ ctxt.Diag("asmins: illegal in 64-bit mode: %v", p)
+ }
- case Zlit:
- for ; ; z++ {
- op = int(o.op[z])
- if op == 0 {
- break
+ case Py: /* 64-bit only, no prefix */
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
+ }
}
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- case Zlitm_r:
- for ; ; z++ {
+ if z >= len(o.op) {
+ log.Fatalf("asmins bad table %v", p)
+ }
op = int(o.op[z])
- if op == 0 {
- break
+ if op == 0x0f {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ z++
+ op = int(o.op[z])
}
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zmb_r:
- bytereg(&p.From, &p.Ft)
- fallthrough
-
- /* fall through */
- case Zm_r:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmand(ctxt, p, &p.From, &p.To)
- case Zm2_r:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zm_r_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zm_r_xm_nr:
- ctxt.Rexflag = 0
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, &p.To)
-
- case Zm_r_i_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.From, &p.To)
- ctxt.Andptr[0] = byte(p.To.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case Zm_r_3d:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.From, &p.To)
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
+ switch yt.zcase {
+ default:
+ ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
+ return
- case Zibm_r:
- for {
- tmp1 := z
- z++
- op = int(o.op[tmp1])
- if op == 0 {
+ case Zpseudo:
break
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- asmand(ctxt, p, &p.From, &p.To)
- ctxt.Andptr[0] = byte(p.To.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
- case Zaut_r:
- ctxt.Andptr[0] = 0x8d
- ctxt.Andptr = ctxt.Andptr[1:] /* leal */
- if p.From.Type != obj.TYPE_ADDR {
- ctxt.Diag("asmins: Zaut sb type ADDR")
- }
- p.From.Type = obj.TYPE_MEM
- asmand(ctxt, p, &p.From, &p.To)
- p.From.Type = obj.TYPE_ADDR
+ case Zlit:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
- case Zm_o:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ case Zlitm_r:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, &p.To)
- case Zr_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, &p.From)
+ case Zmb_r:
+ bytereg(&p.From, &p.Ft)
+ fallthrough
- case Zr_m_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, &p.From)
+ /* fall through */
+ case Zm_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zr_m_xm_nr:
- ctxt.Rexflag = 0
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, &p.From)
+ asmand(ctxt, p, &p.From, &p.To)
- case Zr_m_i_xm:
- mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmand(ctxt, p, &p.To, &p.From)
- ctxt.Andptr[0] = byte(p.From.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zm2_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, &p.To)
- case Zo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ case Zm_r_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, &p.To)
- case Zcallindreg:
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc)
- r.Type = obj.R_CALLIND
- r.Siz = 0
- fallthrough
+ case Zm_r_xm_nr:
+ ctxt.Rexflag = 0
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, &p.To)
- // fallthrough
- case Zo_m64:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zm_r_i_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
- asmandsz(ctxt, p, &p.To, int(o.op[z+1]), 0, 1)
+ case Zm_r_3d:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zm_ibo:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.From, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zibm_r:
+ for {
+ tmp1 := z
+ z++
+ op = int(o.op[tmp1])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zibo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.To, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zaut_r:
+ ctxt.Andptr[0] = 0x8d
+ ctxt.Andptr = ctxt.Andptr[1:] /* leal */
+ if p.From.Type != obj.TYPE_ADDR {
+ ctxt.Diag("asmins: Zaut sb type ADDR")
+ }
+ p.From.Type = obj.TYPE_MEM
+ asmand(ctxt, p, &p.From, &p.To)
+ p.From.Type = obj.TYPE_ADDR
- case Zibo_m_xm:
- z = mediaop(ctxt, o, op, int(yt.zoffset), z)
- asmando(ctxt, p, &p.To, int(o.op[z+1]))
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zm_o:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
- case Z_ib,
- Zib_:
- if yt.zcase == Zib_ {
- a = &p.From
- } else {
- a = &p.To
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, a, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zr_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.From)
- case Zib_rp:
- ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Zr_m_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, &p.From)
- case Zil_rp:
- ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- if o.prefix == Pe {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, &p.From)
- }
+ case Zr_m_xm_nr:
+ ctxt.Rexflag = 0
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, &p.From)
- case Zo_iw:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if p.From.Type != obj.TYPE_NONE {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- }
+ case Zr_m_i_xm:
+ mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmand(ctxt, p, &p.To, &p.From)
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Ziq_rp:
- v = vaddr(ctxt, p, &p.From, &rel)
- l = int(v >> 32)
- if l == 0 && rel.Siz != 8 {
- //p->mark |= 0100;
- //print("zero: %llux %P\n", v, p);
- ctxt.Rexflag &^= (0x40 | Rxw)
+ case Zo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
- ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
- ctxt.Andptr[0] = byte(0xb8 + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- if rel.Type != 0 {
+ case Zcallindreg:
r = obj.Addrel(ctxt.Cursym)
- *r = rel
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- }
+ r.Off = int32(p.Pc)
+ r.Type = obj.R_CALLIND
+ r.Siz = 0
+ fallthrough
- put4(ctxt, int32(v))
- } else if l == -1 && uint64(v)&(uint64(1)<<31) != 0 { /* sign extend */
+ // fallthrough
+ case Zo_m64:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
- //p->mark |= 0100;
- //print("sign: %llux %P\n", v, p);
- ctxt.Andptr[0] = 0xc7
- ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, int(o.op[z+1]), 0, 1)
- asmando(ctxt, p, &p.To, 0)
- put4(ctxt, int32(v)) /* need all 8 */
- } else {
- //print("all: %llux %P\n", v, p);
- ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+ case Zm_ibo:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
- if rel.Type != 0 {
- r = obj.Addrel(ctxt.Cursym)
- *r = rel
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- }
+ case Zibo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- put8(ctxt, v)
- }
+ case Zibo_m_xm:
+ z = mediaop(ctxt, o, op, int(yt.zoffset), z)
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zib_rr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, &p.To)
- ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
- ctxt.Andptr = ctxt.Andptr[1:]
+ case Z_ib,
+ Zib_:
+ if yt.zcase == Zib_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, a, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Z_il,
- Zil_:
- if yt.zcase == Zil_ {
- a = &p.From
- } else {
- a = &p.To
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if o.prefix == Pe {
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, a)
- }
+ case Zib_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zm_ilo,
- Zilo_m:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- if yt.zcase == Zilo_m {
- a = &p.From
- asmando(ctxt, p, &p.To, int(o.op[z+1]))
- } else {
- a = &p.To
- asmando(ctxt, p, &p.From, int(o.op[z+1]))
- }
+ case Zil_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
- if o.prefix == Pe {
- v = vaddr(ctxt, p, a, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, a)
- }
+ case Zo_iw:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if p.From.Type != obj.TYPE_NONE {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
- case Zil_rr:
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, &p.To)
- if o.prefix == Pe {
- v = vaddr(ctxt, p, &p.From, nil)
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else {
- relput4(ctxt, p, &p.From)
- }
+ case Ziq_rp:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ l = int(v >> 32)
+ if l == 0 && rel.Siz != 8 {
+ //p->mark |= 0100;
+ //print("zero: %llux %P\n", v, p);
+ ctxt.Rexflag &^= (0x40 | Rxw)
- case Z_rp:
- ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
- ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+ ctxt.Andptr[0] = byte(0xb8 + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if rel.Type != 0 {
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
- case Zrp_:
- ctxt.Rexflag |= regrex[p.From.Reg] & (Rxb | 0x40)
- ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
- ctxt.Andptr = ctxt.Andptr[1:]
+ put4(ctxt, int32(v))
+ } else if l == -1 && uint64(v)&(uint64(1)<<31) != 0 { /* sign extend */
- case Zclr:
- ctxt.Rexflag &^= Pw
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &p.To, &p.To)
+ //p->mark |= 0100;
+ //print("sign: %llux %P\n", v, p);
+ ctxt.Andptr[0] = 0xc7
+ ctxt.Andptr = ctxt.Andptr[1:]
- case Zcall:
- if p.To.Sym == nil {
- ctxt.Diag("call without target")
- log.Fatalf("bad code")
- }
+ asmando(ctxt, p, &p.To, 0)
+ put4(ctxt, int32(v)) /* need all 8 */
+ } else {
+ //print("all: %llux %P\n", v, p);
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Sym = p.To.Sym
- r.Add = p.To.Offset
- r.Type = obj.R_CALL
- r.Siz = 4
- put4(ctxt, 0)
-
- // TODO: jump across functions needs reloc
- case Zbr,
- Zjmp,
- Zloop:
- if p.To.Sym != nil {
- if yt.zcase != Zjmp {
- ctxt.Diag("branch to ATEXT")
- log.Fatalf("bad code")
- }
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if rel.Type != 0 {
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- r = obj.Addrel(ctxt.Cursym)
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- r.Sym = p.To.Sym
- r.Type = obj.R_PCREL
- r.Siz = 4
- put4(ctxt, 0)
- break
- }
+ put8(ctxt, v)
+ }
- // Assumes q is in this function.
- // TODO: Check in input, preserve in brchain.
+ case Zib_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
- // Fill in backward jump now.
- q = p.Pcond
+ case Z_il,
+ Zil_:
+ if yt.zcase == Zil_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
- if q == nil {
- ctxt.Diag("jmp/branch/loop without target")
- log.Fatalf("bad code")
- }
+ case Zm_ilo,
+ Zilo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if yt.zcase == Zilo_m {
+ a = &p.From
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ } else {
+ a = &p.To
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ }
- if p.Back&1 != 0 {
- v = q.Pc - (p.Pc + 2)
- if v >= -128 {
- if p.As == AJCXZL {
- ctxt.Andptr[0] = 0x67
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
}
+
+ case Zil_rr:
ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- } else if yt.zcase == Zloop {
- ctxt.Diag("loop too far: %v", p)
- } else {
- v -= 5 - 2
- if yt.zcase == Zbr {
- ctxt.Andptr[0] = 0x0f
+ asmand(ctxt, p, &p.To, &p.To)
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
ctxt.Andptr = ctxt.Andptr[1:]
- v--
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
}
- ctxt.Andptr[0] = byte(o.op[z+1])
+ case Z_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 8)
+
+ case Zrp_:
+ ctxt.Rexflag |= regrex[p.From.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 16)
+
+ case Zclr:
+ ctxt.Rexflag &^= Pw
+ ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 24)
+ asmand(ctxt, p, &p.To, &p.To)
+
+ case Zcall:
+ if p.To.Sym == nil {
+ ctxt.Diag("call without target")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(op)
ctxt.Andptr = ctxt.Andptr[1:]
- }
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Add = p.To.Offset
+ r.Type = obj.R_CALL
+ r.Siz = 4
+ put4(ctxt, 0)
+
+ // TODO: jump across functions needs reloc
+ case Zbr,
+ Zjmp,
+ Zloop:
+ if p.To.Sym != nil {
+ if yt.zcase != Zjmp {
+ ctxt.Diag("branch to ATEXT")
+ log.Fatalf("bad code")
+ }
- break
- }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ put4(ctxt, 0)
+ break
+ }
- // Annotate target; will fill in later.
- p.Forwd = q.Comefrom
+ // Assumes q is in this function.
+ // TODO: Check in input, preserve in brchain.
- q.Comefrom = p
- if p.Back&2 != 0 { // short
- if p.As == AJCXZL {
- ctxt.Andptr[0] = 0x67
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(op)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- } else if yt.zcase == Zloop {
- ctxt.Diag("loop too far: %v", p)
- } else {
- if yt.zcase == Zbr {
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- ctxt.Andptr[0] = byte(o.op[z+1])
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0
- ctxt.Andptr = ctxt.Andptr[1:]
- }
+ // Fill in backward jump now.
+ q = p.Pcond
- break
+ if q == nil {
+ ctxt.Diag("jmp/branch/loop without target")
+ log.Fatalf("bad code")
+ }
- /*
- v = q->pc - p->pc - 2;
- if((v >= -128 && v <= 127) || p->pc == -1 || q->pc == -1) {
- *ctxt->andptr++ = op;
- *ctxt->andptr++ = v;
- } else {
- v -= 5-2;
- if(yt.zcase == Zbr) {
- *ctxt->andptr++ = 0x0f;
- v--;
- }
- *ctxt->andptr++ = o->op[z+1];
- *ctxt->andptr++ = v;
- *ctxt->andptr++ = v>>8;
- *ctxt->andptr++ = v>>16;
- *ctxt->andptr++ = v>>24;
- }
- */
+ if p.Back&1 != 0 {
+ v = q.Pc - (p.Pc + 2)
+ if v >= -128 {
+ if p.As == AJCXZL {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if yt.zcase == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ v -= 5 - 2
+ if yt.zcase == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ v--
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
- case Zbyte:
- v = vaddr(ctxt, p, &p.From, &rel)
- if rel.Siz != 0 {
- rel.Siz = uint8(op)
- r = obj.Addrel(ctxt.Cursym)
- *r = rel
- r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
- }
+ break
+ }
- ctxt.Andptr[0] = byte(v)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 1 {
- ctxt.Andptr[0] = byte(v >> 8)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 2 {
- ctxt.Andptr[0] = byte(v >> 16)
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 24)
- ctxt.Andptr = ctxt.Andptr[1:]
- if op > 4 {
- ctxt.Andptr[0] = byte(v >> 32)
+ // Annotate target; will fill in later.
+ p.Forwd = q.Comefrom
+
+ q.Comefrom = p
+ if p.Back&2 != 0 { // short
+ if p.As == AJCXZL {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 40)
+ } else if yt.zcase == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ if yt.zcase == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 48)
+ ctxt.Andptr[0] = 0
ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = byte(v >> 56)
+ ctxt.Andptr[0] = 0
ctxt.Andptr = ctxt.Andptr[1:]
}
+
+ break
+
+ /*
+ v = q->pc - p->pc - 2;
+ if((v >= -128 && v <= 127) || p->pc == -1 || q->pc == -1) {
+ *ctxt->andptr++ = op;
+ *ctxt->andptr++ = v;
+ } else {
+ v -= 5-2;
+ if(yt.zcase == Zbr) {
+ *ctxt->andptr++ = 0x0f;
+ v--;
+ }
+ *ctxt->andptr++ = o->op[z+1];
+ *ctxt->andptr++ = v;
+ *ctxt->andptr++ = v>>8;
+ *ctxt->andptr++ = v>>16;
+ *ctxt->andptr++ = v>>24;
+ }
+ */
+
+ case Zbyte:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ if rel.Siz != 0 {
+ rel.Siz = uint8(op)
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 1 {
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 2 {
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 4 {
+ ctxt.Andptr[0] = byte(v >> 32)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 40)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 48)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 56)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+ }
}
+
+ return
}
+ z += int(yt.zoffset) + xo
}
-
- return
-
-domov:
- for mo = ymovtab; mo[0].as != 0; mo = mo[1:] {
+ var pp obj.Prog
+ var t []byte
+ for mo := ymovtab; mo[0].as != 0; mo = mo[1:] {
if p.As == mo[0].as {
if ycover[ft+int(mo[0].ft)] != 0 {
if ycover[tt+int(mo[0].tt)] != 0 {
t = mo[0].op[:]
- goto mfound
+ switch mo[0].code {
+ default:
+ ctxt.Diag("asmins: unknown mov %d %v", mo[0].code, p)
+
+ case 0: /* lit */
+ for z = 0; t[z] != E; z++ {
+ ctxt.Andptr[0] = t[z]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case 1: /* r,m */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.To, int(t[1]))
+
+ case 2: /* m,r */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.From, int(t[1]))
+
+ case 3: /* r,m - 2op */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(t[2]))
+ ctxt.Rexflag |= regrex[p.From.Reg] & (Rxr | 0x40)
+
+ case 4: /* m,r - 2op */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(t[2]))
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxr | 0x40)
+
+ case 5: /* load full pointer, trash heap */
+ if t[0] != 0 {
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ switch p.To.Index {
+ default:
+ goto bad
+
+ case REG_DS:
+ ctxt.Andptr[0] = 0xc5
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_SS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb2
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_ES:
+ ctxt.Andptr[0] = 0xc4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_FS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_GS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb5
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case 6: /* double shift */
+ if t[0] == Pw {
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal 64: %v", p)
+ }
+ ctxt.Rexflag |= Pw
+ t = t[1:]
+ } else if t[0] == Pe {
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+ t = t[1:]
+ }
+
+ switch p.From.Type {
+ default:
+ goto bad
+
+ case obj.TYPE_CONST:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case obj.TYPE_REG:
+ switch p.From.Reg {
+ default:
+ goto bad
+
+ case REG_CL,
+ REG_CX:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
+ }
+ }
+
+ // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
+ // where you load the TLS base register into a register and then index off that
+ // register to access the actual TLS variables. Systems that allow direct TLS access
+ // are handled in prefixof above and should not be listed here.
+ case 7: /* mov tls, r */
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
+
+ case obj.Hplan9:
+ if ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+ pp.From = obj.Addr{}
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_EXTERN
+ pp.From.Sym = ctxt.Plan9privates
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+
+ // TLS base is 0(FS).
+ case obj.Hsolaris: // TODO(rsc): Delete Hsolaris from list. Should not use this code. See progedit in obj6.c.
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_NONE
+ pp.From.Reg = REG_NONE
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x64
+ ctxt.Andptr = ctxt.Andptr[1:] // FS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+
+ // Windows TLS base is always 0x28(GS).
+ case obj.Hwindows:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_NONE
+ pp.From.Reg = REG_GS
+ pp.From.Offset = 0x28
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x65
+ ctxt.Andptr = ctxt.Andptr[1:] // GS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+ }
+ }
+ return
}
}
}
}
+ goto bad
bad:
if p.Mode != 64 {
* exchange registers and reissue the
* instruction with the operands renamed.
*/
- pp = *p
+ pp := *p
- z = int(p.From.Reg)
+ z := int(p.From.Reg)
if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
if isax(&p.To) || p.To.Type == obj.TYPE_NONE {
// We certainly don't want to exchange
ctxt.Diag("doasm: notfound ft=%d tt=%d %v %d %d", p.Ft, p.Tt, p, oclass(ctxt, p, &p.From), oclass(ctxt, p, &p.To))
return
-
-mfound:
- switch mo[0].code {
- default:
- ctxt.Diag("asmins: unknown mov %d %v", mo[0].code, p)
-
- case 0: /* lit */
- for z = 0; t[z] != E; z++ {
- ctxt.Andptr[0] = t[z]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- case 1: /* r,m */
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmando(ctxt, p, &p.To, int(t[1]))
-
- case 2: /* m,r */
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- asmando(ctxt, p, &p.From, int(t[1]))
-
- case 3: /* r,m - 2op */
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = t[1]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.To, int(t[2]))
- ctxt.Rexflag |= regrex[p.From.Reg] & (Rxr | 0x40)
-
- case 4: /* m,r - 2op */
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
-
- ctxt.Andptr[0] = t[1]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmando(ctxt, p, &p.From, int(t[2]))
- ctxt.Rexflag |= regrex[p.To.Reg] & (Rxr | 0x40)
-
- case 5: /* load full pointer, trash heap */
- if t[0] != 0 {
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
- }
- switch p.To.Index {
- default:
- goto bad
-
- case REG_DS:
- ctxt.Andptr[0] = 0xc5
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_SS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb2
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_ES:
- ctxt.Andptr[0] = 0xc4
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_FS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb4
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case REG_GS:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = 0xb5
- ctxt.Andptr = ctxt.Andptr[1:]
- }
-
- asmand(ctxt, p, &p.From, &p.To)
-
- case 6: /* double shift */
- if t[0] == Pw {
- if p.Mode != 64 {
- ctxt.Diag("asmins: illegal 64: %v", p)
- }
- ctxt.Rexflag |= Pw
- t = t[1:]
- } else if t[0] == Pe {
- ctxt.Andptr[0] = Pe
- ctxt.Andptr = ctxt.Andptr[1:]
- t = t[1:]
- }
-
- switch p.From.Type {
- default:
- goto bad
-
- case obj.TYPE_CONST:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = t[0]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
- ctxt.Andptr[0] = byte(p.From.Offset)
- ctxt.Andptr = ctxt.Andptr[1:]
-
- case obj.TYPE_REG:
- switch p.From.Reg {
- default:
- goto bad
-
- case REG_CL,
- REG_CX:
- ctxt.Andptr[0] = 0x0f
- ctxt.Andptr = ctxt.Andptr[1:]
- ctxt.Andptr[0] = t[1]
- ctxt.Andptr = ctxt.Andptr[1:]
- asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
- }
- }
-
- // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
- // where you load the TLS base register into a register and then index off that
- // register to access the actual TLS variables. Systems that allow direct TLS access
- // are handled in prefixof above and should not be listed here.
- case 7: /* mov tls, r */
- switch ctxt.Headtype {
- default:
- log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
-
- case obj.Hplan9:
- if ctxt.Plan9privates == nil {
- ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
- }
- pp.From = obj.Addr{}
- pp.From.Type = obj.TYPE_MEM
- pp.From.Name = obj.NAME_EXTERN
- pp.From.Sym = ctxt.Plan9privates
- pp.From.Offset = 0
- pp.From.Index = REG_NONE
- ctxt.Rexflag |= Pw
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, &p.To)
-
- // TLS base is 0(FS).
- case obj.Hsolaris: // TODO(rsc): Delete Hsolaris from list. Should not use this code. See progedit in obj6.c.
- pp.From = p.From
-
- pp.From.Type = obj.TYPE_MEM
- pp.From.Name = obj.NAME_NONE
- pp.From.Reg = REG_NONE
- pp.From.Offset = 0
- pp.From.Index = REG_NONE
- pp.From.Scale = 0
- ctxt.Rexflag |= Pw
- ctxt.Andptr[0] = 0x64
- ctxt.Andptr = ctxt.Andptr[1:] // FS
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, &p.To)
-
- // Windows TLS base is always 0x28(GS).
- case obj.Hwindows:
- pp.From = p.From
-
- pp.From.Type = obj.TYPE_MEM
- pp.From.Name = obj.NAME_NONE
- pp.From.Reg = REG_GS
- pp.From.Offset = 0x28
- pp.From.Index = REG_NONE
- pp.From.Scale = 0
- ctxt.Rexflag |= Pw
- ctxt.Andptr[0] = 0x65
- ctxt.Andptr = ctxt.Andptr[1:] // GS
- ctxt.Andptr[0] = 0x8B
- ctxt.Andptr = ctxt.Andptr[1:]
- asmand(ctxt, p, &pp.From, &p.To)
- }
- }
}
var naclret = []uint8{
}
func asmins(ctxt *obj.Link, p *obj.Prog) {
- var i int
- var n int
- var np int
- var c int
- var and0 []byte
- var r *obj.Reloc
-
ctxt.Andptr = ctxt.And[:]
ctxt.Asmode = int(p.Mode)
if p.As == obj.AUSEFIELD {
- r = obj.Addrel(ctxt.Cursym)
+ r := obj.Addrel(ctxt.Cursym)
r.Off = 0
r.Siz = 0
r.Sym = p.From.Sym
}
ctxt.Rexflag = 0
- and0 = ctxt.Andptr
+ and0 := ctxt.Andptr
ctxt.Asmode = int(p.Mode)
doasm(ctxt, p)
if ctxt.Rexflag != 0 {
if p.Mode != 64 {
ctxt.Diag("asmins: illegal in mode %d: %v", p.Mode, p)
}
- n = -cap(ctxt.Andptr) + cap(and0)
+ n := -cap(ctxt.Andptr) + cap(and0)
+ var c int
+ var np int
for np = 0; np < n; np++ {
c = int(and0[np])
if c != 0xf2 && c != 0xf3 && (c < 0x64 || c > 0x67) && c != 0x2e && c != 0x3e && c != 0x26 {
ctxt.Andptr = ctxt.Andptr[1:]
}
- n = -cap(ctxt.Andptr) + cap(ctxt.And[:])
- for i = len(ctxt.Cursym.R) - 1; i >= 0; i-- {
+ n := -cap(ctxt.Andptr) + cap(ctxt.And[:])
+ var r *obj.Reloc
+ for i := len(ctxt.Cursym.R) - 1; i >= 0; i-- {
r = &ctxt.Cursym.R[i:][0]
if int64(r.Off) < p.Pc {
break
func Pconv(p *obj.Prog) string {
var str string
- var fp string
switch p.As {
case obj.ADATA:
}
}
+ var fp string
fp += str
return fp
}
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
- var literal string
- var s *obj.LSym
- var q *obj.Prog
-
// Thread-local storage references use the TLS pseudo-register.
// As a register, TLS refers to the thread-local storage base, and it
// can only be loaded into another register:
// MOVQ off(BX)(TLS*1), BX
// This allows the C compilers to emit references to m and g using the direct off(TLS) form.
if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
- q = obj.Appendp(ctxt, p)
+ q := obj.Appendp(ctxt, p)
q.As = p.As
q.From = p.From
q.From.Type = obj.TYPE_MEM
ACOMISS,
AUCOMISS:
if p.From.Type == obj.TYPE_FCONST {
- var i32 uint32
- var f32 float32
- f32 = float32(p.From.U.Dval)
- i32 = math.Float32bits(f32)
- literal = fmt.Sprintf("$f32.%08x", i32)
- s = obj.Linklookup(ctxt, literal, 0)
+ f32 := float32(p.From.U.Dval)
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
ACOMISD,
AUCOMISD:
if p.From.Type == obj.TYPE_FCONST {
- var i64 uint64
- i64 = math.Float64bits(p.From.U.Dval)
- literal = fmt.Sprintf("$f64.%016x", i64)
- s = obj.Linklookup(ctxt, literal, 0)
+ i64 := math.Float64bits(p.From.U.Dval)
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
- var p *obj.Prog
- var q *obj.Prog
- var p1 *obj.Prog
- var p2 *obj.Prog
- var autoffset int32
- var deltasp int32
- var a int
- var pcsize int
- var bpsize int
- var textarg int64
-
if ctxt.Tlsg == nil {
ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
}
return
}
- p = cursym.Text
- autoffset = int32(p.To.Offset)
+ p := cursym.Text
+ autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
+ var bpsize int
if obj.Framepointer_enabled != 0 && autoffset > 0 {
// Make room for to save a base pointer. If autoffset == 0,
// this might do something special like a tail jump to
bpsize = 0
}
- textarg = int64(p.To.U.Argsize)
+ textarg := int64(p.To.U.Argsize)
cursym.Args = int32(textarg)
cursym.Locals = int32(p.To.Offset)
if autoffset < obj.StackSmall && p.From3.Offset&obj.NOSPLIT == 0 {
- for q = p; q != nil; q = q.Link {
+ for q := p; q != nil; q = q.Link {
if q.As == obj.ACALL {
goto noleaf
}
noleaf:
}
- q = nil
+ q := (*obj.Prog)(nil)
if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
p = load_g_cx(ctxt, p) // load g into CX
if q != nil {
q.Pcond = p
}
- deltasp = autoffset
+ deltasp := autoffset
if bpsize > 0 {
// Save caller's BP
p = obj.Appendp(ctxt, p)
p.As = AJEQ
p.To.Type = obj.TYPE_BRANCH
- p1 = p
+ p1 := p
p = obj.Appendp(ctxt, p)
p.As = ALEAQ
p = obj.Appendp(ctxt, p)
p.As = AJNE
p.To.Type = obj.TYPE_BRANCH
- p2 = p
+ p2 := p
p = obj.Appendp(ctxt, p)
p.As = AMOVQ
p.As = ASTOSQ
}
+ var a int
+ var pcsize int
for ; p != nil; p = p.Link {
pcsize = int(p.Mode) / 8
a = int(p.From.Name)
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
- var next *obj.Prog
-
p.As = AMOVQ
if ctxt.Arch.Ptrsize == 4 {
p.As = AMOVL
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_CX
- next = p.Link
+ next := p.Link
progedit(ctxt, p)
for p.Link != next {
p = p.Link
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
- var q *obj.Prog
- var q1 *obj.Prog
- var cmp int
- var lea int
- var mov int
- var sub int
-
- cmp = ACMPQ
- lea = ALEAQ
- mov = AMOVQ
- sub = ASUBQ
+ cmp := ACMPQ
+ lea := ALEAQ
+ mov := AMOVQ
+ sub := ASUBQ
if ctxt.Headtype == obj.Hnacl {
cmp = ACMPL
sub = ASUBL
}
- q1 = nil
+ q1 := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP <= stackguard
// CMPQ SP, stackguard
p.As = AJHI
p.To.Type = obj.TYPE_BRANCH
- q = p
+ q := p
p = obj.Appendp(ctxt, p)
p.As = obj.ACALL
}
func follow(ctxt *obj.Link, s *obj.LSym) {
- var firstp *obj.Prog
- var lastp *obj.Prog
-
ctxt.Cursym = s
- firstp = ctxt.NewProg()
- lastp = firstp
+ firstp := ctxt.NewProg()
+ lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link