defer hidePanic()
- archInit(&Thearch)
+ archInit(&thearch)
- Ctxt = obj.Linknew(Thearch.LinkArch)
+ Ctxt = obj.Linknew(thearch.LinkArch)
Ctxt.DebugInfo = debuginfo
Ctxt.DiagFunc = yyerror
Ctxt.Bso = bufio.NewWriter(os.Stdout)
flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
var flag_shared bool
var flag_dynlink bool
- if supportsDynlink(Thearch.LinkArch.Arch) {
+ if supportsDynlink(thearch.LinkArch.Arch) {
flag.BoolVar(&flag_shared, "shared", false, "generate code that can be linked into a shared library")
flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
}
- if Thearch.LinkArch.Family == sys.AMD64 {
+ if thearch.LinkArch.Family == sys.AMD64 {
flag.BoolVar(&flag_largemodel, "largemodel", false, "generate code that assumes a large memory model")
}
flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
Debug['l'] = 1 - Debug['l']
}
- Widthint = Thearch.LinkArch.IntSize
- Widthptr = Thearch.LinkArch.PtrSize
- Widthreg = Thearch.LinkArch.RegSize
+ Widthint = thearch.LinkArch.IntSize
+ Widthptr = thearch.LinkArch.PtrSize
+ Widthreg = thearch.LinkArch.RegSize
initUniverse()
var ssaCache *ssa.Cache
func initssaconfig() {
- ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
- if Thearch.LinkArch.Name == "386" {
- ssaConfig.Set387(Thearch.Use387)
+ ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
+ if thearch.LinkArch.Name == "386" {
+ ssaConfig.Set387(thearch.Use387)
}
ssaCache = new(ssa.Cache)
}
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
- if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" && Thearch.LinkArch.Family != sys.MIPS {
+ if s.config.IntSize == 4 && thearch.LinkArch.Name != "amd64p32" && thearch.LinkArch.Family != sys.MIPS {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
- if Thearch.LinkArch.Name == "arm64" {
+ if thearch.LinkArch.Name == "arm64" {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
- if Thearch.LinkArch.Family == sys.MIPS {
+ if thearch.LinkArch.Family == sys.MIPS {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
return nil
}
fn := sym.Name
- return intrinsics[intrinsicKey{Thearch.LinkArch.Arch, pkg, fn}]
+ return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
func isIntrinsicCall(n *Node) bool {
off += size
}
off = Rnd(off, int64(Widthptr))
- if Thearch.LinkArch.Name == "amd64p32" {
+ if thearch.LinkArch.Name == "amd64p32" {
// amd64p32 wants 8-byte alignment of the start of the return values.
off = Rnd(off, 8)
}
blockProgs[pc] = f.Blocks[0]
}
- if Thearch.Use387 {
+ if thearch.Use387 {
s.SSEto387 = map[int16]int16{}
}
for i, b := range f.Blocks {
s.bstart[b.ID] = pc
// Emit values in block
- Thearch.SSAMarkMoves(&s, b)
+ thearch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := pc
s.SetPos(v.Pos)
default:
// let the backend handle it
- Thearch.SSAGenValue(&s, v)
+ thearch.SSAGenValue(&s, v)
}
if logProgs {
}
x := pc
s.SetPos(b.Pos)
- Thearch.SSAGenBlock(&s, b, next)
+ thearch.SSAGenBlock(&s, b, next)
if logProgs {
for ; x != pc; x = x.Link {
blockProgs[x] = b
liveness(Curfn, ptxt, gcargs, gclocals)
// Add frame prologue. Zero ambiguously live variables.
- Thearch.Defframe(ptxt)
+ thearch.Defframe(ptxt)
if Debug['f'] != 0 {
frame(0)
}
a.Type = obj.TYPE_MEM
a.Node = n
a.Sym = Linksym(n.Sym)
- a.Reg = int16(Thearch.REGSP)
+ a.Reg = int16(thearch.REGSP)
a.Offset = n.Xoffset + off
if n.Class == PPARAM || n.Class == PPARAMOUT {
a.Name = obj.NAME_PARAM
a.Name = obj.NAME_AUTO
a.Node = s.ScratchFpMem
a.Sym = Linksym(s.ScratchFpMem.Sym)
- a.Reg = int16(Thearch.REGSP)
+ a.Reg = int16(thearch.REGSP)
a.Offset = s.ScratchFpMem.Xoffset
}
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
- Thearch.Ginsnop()
+ thearch.Ginsnop()
}
p := Prog(obj.ACALL)
p.To.Sym = sym
} else {
// TODO(mdempsky): Can these differences be eliminated?
- switch Thearch.LinkArch.Family {
+ switch thearch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.S390X:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
}
// Return the two parts of the larger variable.
- if Thearch.LinkArch.ByteOrder == binary.BigEndian {
+ if thearch.LinkArch.ByteOrder == binary.BigEndian {
return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off + 4}
}
return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}