var knownFormats = map[string]string{
"*bytes.Buffer %s": "",
"*cmd/compile/internal/gc.EscLocation %v": "",
+ "*cmd/compile/internal/ir.Func %+v": "",
+ "*cmd/compile/internal/ir.Func %L": "",
+ "*cmd/compile/internal/ir.Func %v": "",
+ "*cmd/compile/internal/ir.Name %#v": "",
+ "*cmd/compile/internal/ir.Name %+v": "",
+ "*cmd/compile/internal/ir.Name %L": "",
"*cmd/compile/internal/ir.Name %v": "",
"*cmd/compile/internal/ir.node %v": "",
"*cmd/compile/internal/ssa.Block %s": "",
"*math/big.Float %f": "",
"*math/big.Int %s": "",
"[16]byte %x": "",
+ "[]*cmd/compile/internal/ir.Name %v": "",
"[]*cmd/compile/internal/ssa.Block %v": "",
"[]*cmd/compile/internal/ssa.Value %v": "",
"[][]string %q": "",
"cmd/compile/internal/ir.Class %d": "",
"cmd/compile/internal/ir.Class %v": "",
"cmd/compile/internal/ir.FmtMode %d": "",
- "cmd/compile/internal/ir.Node %#v": "",
"cmd/compile/internal/ir.Node %+S": "",
"cmd/compile/internal/ir.Node %+v": "",
"cmd/compile/internal/ir.Node %L": "",
funcbody()
- fn.Func().SetDupok(true)
- fn = typecheck(fn, ctxStmt)
+ fn.SetDupok(true)
+ typecheckFunc(fn)
Curfn = fn
typecheckslice(fn.Body().Slice(), ctxStmt)
testdclstack()
}
- fn.Func().SetNilCheckDisabled(true)
+ fn.SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Build closure. It doesn't close over any variables, so
funcbody()
- fn.Func().SetDupok(true)
- fn = typecheck(fn, ctxStmt)
+ fn.SetDupok(true)
+ typecheckFunc(fn)
Curfn = fn
typecheckslice(fn.Body().Slice(), ctxStmt)
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
- fn.Func().SetNilCheckDisabled(true)
+ fn.SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Generate a closure which points at the function we just generated.
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME && n.Class() == ir.PFUNC {
- inlFlood(n)
+ inlFlood(n.(*ir.Name))
}
p.markType(n.Type())
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
- dcl := p.nod(expr, ir.ODCLFUNC, nil, nil)
- fn := dcl.Func()
+ fn := ir.NewFunc(p.pos(expr))
fn.SetIsHiddenClosure(Curfn != nil)
- fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
- fn.Nname.Name().Ntype = xtype
- fn.Nname.Name().Defn = dcl
+ fn.Nname = newFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
+ fn.Nname.Ntype = xtype
+ fn.Nname.Defn = fn
clo := p.nod(expr, ir.OCLOSURE, nil, nil)
clo.SetFunc(fn)
fn.ClosureType = ntype
fn.OClosure = clo
- p.funcBody(dcl, expr.Body)
+ p.funcBody(fn, expr.Body)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
- for _, v := range fn.ClosureVars.Slice() {
+ for _, v := range fn.ClosureVars {
// Unlink from v1; see comment in syntax.go type Param for these fields.
- v1 := v.Name().Defn
- v1.Name().Innermost = v.Name().Outer
+ v1 := v.Defn
+ v1.Name().Innermost = v.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
- v.Name().Outer = oldname(v.Sym()).(*ir.Name)
+ v.Outer = oldname(v.Sym()).(*ir.Name)
}
return clo
// separate pass from type-checking.
func typecheckclosure(clo ir.Node, top int) {
fn := clo.Func()
- dcl := fn.Decl
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
- dcl.SetIota(x)
+ fn.SetIota(x)
}
fn.ClosureType = typecheck(fn.ClosureType, ctxType)
clo.SetType(fn.ClosureType.Type())
- fn.ClosureCalled = top&ctxCallee != 0
+ fn.SetClosureCalled(top&ctxCallee != 0)
- // Do not typecheck dcl twice, otherwise, we will end up pushing
- // dcl to xtop multiple times, causing initLSym called twice.
+ // Do not typecheck fn twice, otherwise, we will end up pushing
+ // fn to xtop multiple times, causing initLSym called twice.
// See #30709
- if dcl.Typecheck() == 1 {
+ if fn.Typecheck() == 1 {
return
}
- for _, ln := range fn.ClosureVars.Slice() {
- n := ln.Name().Defn
+ for _, ln := range fn.ClosureVars {
+ n := ln.Defn
if !n.Name().Captured() {
n.Name().SetCaptured(true)
if n.Name().Decldepth == 0 {
fn.Nname.SetSym(closurename(Curfn))
setNodeNameFunc(fn.Nname)
- dcl = typecheck(dcl, ctxStmt)
+ typecheckFunc(fn)
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// underlying closure function we create is added to xtop.
if Curfn != nil && clo.Type() != nil {
oldfn := Curfn
- Curfn = dcl
+ Curfn = fn
olddd := decldepth
decldepth = 1
- typecheckslice(dcl.Body().Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
decldepth = olddd
Curfn = oldfn
}
- xtop = append(xtop, dcl)
+ xtop = append(xtop, fn)
}
// globClosgen is like Func.Closgen, but for the global scope.
-var globClosgen int
+var globClosgen int32
// closurename generates a new unique name for a closure within
// outerfunc.
-func closurename(outerfunc ir.Node) *types.Sym {
+func closurename(outerfunc *ir.Func) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfunc != nil {
- if outerfunc.Func().OClosure != nil {
+ if outerfunc.OClosure != nil {
prefix = ""
}
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
- if !ir.IsBlank(outerfunc.Func().Nname) {
- gen = &outerfunc.Func().Closgen
+ if !ir.IsBlank(outerfunc.Nname) {
+ gen = &outerfunc.Closgen
}
}
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
-func capturevars(dcl ir.Node) {
+func capturevars(fn *ir.Func) {
lno := base.Pos
- base.Pos = dcl.Pos()
- fn := dcl.Func()
- cvars := fn.ClosureVars.Slice()
+ base.Pos = fn.Pos()
+ cvars := fn.ClosureVars
out := cvars[:0]
for _, v := range cvars {
if v.Type() == nil {
dowidth(v.Type())
var outer ir.Node
- outer = v.Name().Outer
- outermost := v.Name().Defn
+ outer = v.Outer
+ outermost := v.Defn
// out parameters will be assigned to implicitly upon return.
if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
- v.Name().SetByval(true)
+ v.SetByval(true)
} else {
outermost.Name().SetAddrtaken(true)
outer = ir.Nod(ir.OADDR, outer, nil)
if base.Flag.LowerM > 1 {
var name *types.Sym
- if v.Name().Curfn != nil && v.Name().Curfn.Func().Nname != nil {
- name = v.Name().Curfn.Func().Nname.Sym()
+ if v.Curfn != nil && v.Curfn.Nname != nil {
+ name = v.Curfn.Sym()
}
how := "ref"
- if v.Name().Byval() {
+ if v.Byval() {
how = "value"
}
base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width))
fn.ClosureEnter.Append(outer)
}
- fn.ClosureVars.Set(out)
+ fn.ClosureVars = out
base.Pos = lno
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
-func transformclosure(dcl ir.Node) {
+func transformclosure(fn *ir.Func) {
lno := base.Pos
- base.Pos = dcl.Pos()
- fn := dcl.Func()
+ base.Pos = fn.Pos()
- if fn.ClosureCalled {
+ if fn.ClosureCalled() {
// If the closure is directly called, we transform it to a plain function call
// with variables passed as args. This avoids allocation of a closure object.
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
// We are going to insert captured variables before input args.
var params []*types.Field
- var decls []ir.Node
- for _, v := range fn.ClosureVars.Slice() {
- if !v.Name().Byval() {
+ var decls []*ir.Name
+ for _, v := range fn.ClosureVars {
+ if !v.Byval() {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
addr := NewName(lookup("&" + v.Sym().Name))
addr.SetType(types.NewPtr(v.Type()))
- v.Name().Heapaddr = addr
+ v.Heapaddr = addr
v = addr
}
}
dowidth(f.Type())
- dcl.SetType(f.Type()) // update type of ODCLFUNC
+ fn.SetType(f.Type()) // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
var body []ir.Node
offset := int64(Widthptr)
- for _, v := range fn.ClosureVars.Slice() {
+ for _, v := range fn.ClosureVars {
// cv refers to the field inside of closure OSTRUCTLIT.
cv := ir.Nod(ir.OCLOSUREVAR, nil, nil)
cv.SetType(v.Type())
- if !v.Name().Byval() {
+ if !v.Byval() {
cv.SetType(types.NewPtr(v.Type()))
}
offset = Rnd(offset, int64(cv.Type().Align))
cv.SetOffset(offset)
offset += cv.Type().Width
- if v.Name().Byval() && v.Type().Width <= int64(2*Widthptr) {
+ if v.Byval() && v.Type().Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
v.SetClass(ir.PAUTO)
fn.Dcl = append(fn.Dcl, v)
addr := NewName(lookup("&" + v.Sym().Name))
addr.SetType(types.NewPtr(v.Type()))
addr.SetClass(ir.PAUTO)
- addr.Name().SetUsed(true)
- addr.Name().Curfn = dcl
+ addr.SetUsed(true)
+ addr.Curfn = fn
fn.Dcl = append(fn.Dcl, addr)
- v.Name().Heapaddr = addr
- if v.Name().Byval() {
+ v.Heapaddr = addr
+ if v.Byval() {
cv = ir.Nod(ir.OADDR, cv, nil)
}
body = append(body, ir.Nod(ir.OAS, addr, cv))
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
func hasemptycvars(clo ir.Node) bool {
- return clo.Func().ClosureVars.Len() == 0
+ return len(clo.Func().ClosureVars) == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
fields := []ir.Node{
namedfield(".F", types.Types[types.TUINTPTR]),
}
- for _, v := range clo.Func().ClosureVars.Slice() {
+ for _, v := range clo.Func().ClosureVars {
typ := v.Type()
- if !v.Name().Byval() {
+ if !v.Byval() {
typ = types.NewPtr(typ)
}
fields = append(fields, symfield(v.Sym(), typ))
}
// Create top-level function.
- dcl := makepartialcall(dot, dot.Type(), sym)
- dcl.Func().SetWrapper(true)
+ fn := makepartialcall(dot, dot.Type(), sym)
+ fn.SetWrapper(true)
+
dot.SetOp(ir.OCALLPART)
dot.SetRight(NewName(sym))
- dot.SetType(dcl.Type())
- dot.SetFunc(dcl.Func())
+ dot.SetType(fn.Type())
+ dot.SetFunc(fn)
dot.SetOpt(nil) // clear types.Field from ODOTMETH
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
-func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node {
+func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) *ir.Func {
rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
if sym.Uniq() {
- return ir.AsNode(sym.Def)
+ return ir.AsNode(sym.Def).(*ir.Func)
}
sym.SetUniq(true)
tfn.PtrList().Set(structargs(t0.Params(), true))
tfn.PtrRlist().Set(structargs(t0.Results(), false))
- dcl := dclfunc(sym, tfn)
- fn := dcl.Func()
+ fn := dclfunc(sym, tfn)
fn.SetDupok(true)
fn.SetNeedctxt(true)
ptr := NewName(lookup(".this"))
declare(ptr, ir.PAUTO)
- ptr.Name().SetUsed(true)
+ ptr.SetUsed(true)
var body []ir.Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
ptr.SetType(rcvrtype)
}
body = append(body, call)
- dcl.PtrBody().Set(body)
+ fn.PtrBody().Set(body)
funcbody()
- dcl = typecheck(dcl, ctxStmt)
+ typecheckFunc(fn)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
- Curfn = dcl
- typecheckslice(dcl.Body().Slice(), ctxStmt)
- sym.Def = dcl
- xtop = append(xtop, dcl)
+ Curfn = fn
+ typecheckslice(fn.Body().Slice(), ctxStmt)
+ sym.Def = fn
+ xtop = append(xtop, fn)
Curfn = savecurfn
base.Pos = saveLineNo
- return dcl
+ return fn
}
// partialCallType returns the struct type used to hold all the information
// declare records that Node n declares symbol n.Sym in the specified
// declaration context.
-func declare(n ir.Node, ctxt ir.Class) {
+func declare(n *ir.Name, ctxt ir.Class) {
if ir.IsBlank(n) {
return
}
base.Fatalf("automatic outside function")
}
if Curfn != nil && ctxt != ir.PFUNC {
- Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
+ Curfn.Dcl = append(Curfn.Dcl, n)
}
if n.Op() == ir.OTYPE {
declare_typegen++
autoexport(n, ctxt)
}
-func addvar(n ir.Node, t *types.Type, ctxt ir.Class) {
+func addvar(n *ir.Name, t *types.Type, ctxt ir.Class) {
if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil {
base.Fatalf("addvar: n=%v t=%v nil", n, t)
}
as2.PtrList().Set(vl)
as2.PtrRlist().Set1(e)
for _, v := range vl {
+ v := v.(*ir.Name)
v.SetOp(ir.ONAME)
declare(v, dclcontext)
- v.Name().Ntype = t
- v.Name().Defn = as2
+ v.Ntype = t
+ v.Defn = as2
if Curfn != nil {
init = append(init, ir.Nod(ir.ODCL, v, nil))
}
nel := len(el)
for _, v := range vl {
+ v := v.(*ir.Name)
var e ir.Node
if doexpr {
if len(el) == 0 {
v.SetOp(ir.ONAME)
declare(v, dclcontext)
- v.Name().Ntype = t
+ v.Ntype = t
if e != nil || Curfn != nil || ir.IsBlank(v) {
if Curfn != nil {
e = ir.Nod(ir.OAS, v, e)
init = append(init, e)
if e.Right() != nil {
- v.Name().Defn = e
+ v.Defn = e
}
}
}
return n
}
-// newfuncnamel generates a new name node for a function or method.
-func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) ir.Node {
+// newFuncNameAt generates a new name node for a function or method.
+func newFuncNameAt(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Name {
if fn.Nname != nil {
- base.Fatalf("newfuncnamel - already have name")
+ base.Fatalf("newFuncName - already have name")
}
n := ir.NewNameAt(pos, s)
n.SetFunc(fn)
// the := it looks like a reference to the outer x so we'll
// make x a closure variable unnecessarily.
c := n.Name().Innermost
- if c == nil || c.Name().Curfn != Curfn {
+ if c == nil || c.Curfn != Curfn {
// Do not have a closure var for the active closure yet; make one.
c = NewName(s)
c.SetClass(ir.PAUTOHEAP)
- c.Name().SetIsClosureVar(true)
+ c.SetIsClosureVar(true)
c.SetIsDDD(n.IsDDD())
- c.Name().Defn = n
+ c.Defn = n
// Link into list of active closure variables.
// Popped from list in func funcLit.
- c.Name().Outer = n.Name().Innermost
+ c.Outer = n.Name().Innermost
n.Name().Innermost = c
- Curfn.Func().ClosureVars.Append(c)
+ Curfn.ClosureVars = append(Curfn.ClosureVars, c)
}
// return ref to closure var, not original
}
nnew++
- n = NewName(n.Sym())
+ n := NewName(n.Sym())
declare(n, dclcontext)
n.Name().Defn = defn
defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
// and declare the arguments.
// called in extern-declaration context
// returns in auto-declaration context.
-func funchdr(n ir.Node) {
+func funchdr(fn *ir.Func) {
// change the declaration context from extern to auto
funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
- Curfn = n
+ Curfn = fn
dclcontext = ir.PAUTO
types.Markdcl()
- if n.Func().Nname != nil && n.Func().Nname.Name().Ntype != nil {
- funcargs(n.Func().Nname.Name().Ntype)
+ if fn.Nname != nil && fn.Nname.Ntype != nil {
+ funcargs(fn.Nname.Ntype)
} else {
- funcargs2(n.Type())
+ funcargs2(fn.Type())
}
}
return
}
- n.SetRight(ir.NewNameAt(n.Pos(), n.Sym()))
- n.Right().Name().Ntype = n.Left()
- n.Right().SetIsDDD(n.IsDDD())
- declare(n.Right(), ctxt)
+ name := ir.NewNameAt(n.Pos(), n.Sym())
+ n.SetRight(name)
+ name.Ntype = n.Left()
+ name.SetIsDDD(n.IsDDD())
+ declare(name, ctxt)
vargen++
n.Right().Name().Vargen = int32(vargen)
var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
type funcStackEnt struct {
- curfn ir.Node
+ curfn *ir.Func
dclcontext ir.Class
}
n.Sym().SetFunc(true)
}
-func dclfunc(sym *types.Sym, tfn ir.Node) ir.Node {
+func dclfunc(sym *types.Sym, tfn ir.Node) *ir.Func {
if tfn.Op() != ir.OTFUNC {
base.Fatalf("expected OTFUNC node, got %v", tfn)
}
- fn := ir.Nod(ir.ODCLFUNC, nil, nil)
- fn.Func().Nname = newfuncnamel(base.Pos, sym, fn.Func())
- fn.Func().Nname.Name().Defn = fn
- fn.Func().Nname.Name().Ntype = tfn
- setNodeNameFunc(fn.Func().Nname)
+ fn := ir.NewFunc(base.Pos)
+ fn.Nname = newFuncNameAt(base.Pos, sym, fn)
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = tfn
+ setNodeNameFunc(fn.Nname)
funchdr(fn)
- fn.Func().Nname.Name().Ntype = typecheck(fn.Func().Nname.Name().Ntype, ctxType)
+ fn.Nname.Ntype = typecheck(fn.Nname.Ntype, ctxType)
return fn
}
extraCalls map[ir.Node][]nowritebarrierrecCall
// curfn is the current function during AST walks.
- curfn ir.Node
+ curfn *ir.Func
}
type nowritebarrierrecCall struct {
- target ir.Node // ODCLFUNC of caller or callee
+ target *ir.Func // caller or callee
lineno src.XPos // line of call
}
if n.Op() != ir.ODCLFUNC {
continue
}
- c.curfn = n
+ c.curfn = n.(*ir.Func)
ir.Inspect(n, c.findExtraCalls)
}
c.curfn = nil
return true
}
- var callee ir.Node
+ var callee *ir.Func
arg := n.List().First()
switch arg.Op() {
case ir.ONAME:
- callee = arg.Name().Defn
+ callee = arg.Name().Defn.(*ir.Func)
case ir.OCLOSURE:
- callee = arg.Func().Decl
+ callee = arg.Func()
default:
base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
}
// because that's all we know after we start SSA.
//
// This can be called concurrently for different from Nodes.
-func (c *nowritebarrierrecChecker) recordCall(from ir.Node, to *obj.LSym, pos src.XPos) {
- if from.Op() != ir.ODCLFUNC {
- base.Fatalf("expected ODCLFUNC, got %v", from)
- }
- // We record this information on the *Func so this is
- // concurrent-safe.
- fn := from.Func()
+func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) {
+ // We record this information on the *Func so this is concurrent-safe.
if fn.NWBRCalls == nil {
fn.NWBRCalls = new([]ir.SymAndPos)
}
// capture all calls created by lowering, but this means we
// only get to see the obj.LSyms of calls. symToFunc lets us
// get back to the ODCLFUNCs.
- symToFunc := make(map[*obj.LSym]ir.Node)
+ symToFunc := make(map[*obj.LSym]*ir.Func)
// funcs records the back-edges of the BFS call graph walk. It
// maps from the ODCLFUNC of each function that must not have
// write barriers to the call that inhibits them. Functions
if n.Op() != ir.ODCLFUNC {
continue
}
+ fn := n.(*ir.Func)
- symToFunc[n.Func().LSym] = n
+ symToFunc[fn.LSym] = fn
// Make nowritebarrierrec functions BFS roots.
- if n.Func().Pragma&ir.Nowritebarrierrec != 0 {
- funcs[n] = nowritebarrierrecCall{}
- q.PushRight(n)
+ if fn.Pragma&ir.Nowritebarrierrec != 0 {
+ funcs[fn] = nowritebarrierrecCall{}
+ q.PushRight(fn)
}
// Check go:nowritebarrier functions.
- if n.Func().Pragma&ir.Nowritebarrier != 0 && n.Func().WBPos.IsKnown() {
- base.ErrorfAt(n.Func().WBPos, "write barrier prohibited")
+ if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() {
+ base.ErrorfAt(fn.WBPos, "write barrier prohibited")
}
}
// Perform a BFS of the call graph from all
// go:nowritebarrierrec functions.
- enqueue := func(src, target ir.Node, pos src.XPos) {
- if target.Func().Pragma&ir.Yeswritebarrierrec != 0 {
+ enqueue := func(src, target *ir.Func, pos src.XPos) {
+ if target.Pragma&ir.Yeswritebarrierrec != 0 {
// Don't flow into this function.
return
}
q.PushRight(target)
}
for !q.Empty() {
- fn := q.PopLeft()
+ fn := q.PopLeft().(*ir.Func)
// Check fn.
- if fn.Func().WBPos.IsKnown() {
+ if fn.WBPos.IsKnown() {
var err bytes.Buffer
call := funcs[fn]
for call.target != nil {
- fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Func().Nname)
+ fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
call = funcs[call.target]
}
- base.ErrorfAt(fn.Func().WBPos, "write barrier prohibited by caller; %v%s", fn.Func().Nname, err.String())
+ base.ErrorfAt(fn.WBPos, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
continue
}
for _, callee := range c.extraCalls[fn] {
enqueue(fn, callee.target, callee.lineno)
}
- if fn.Func().NWBRCalls == nil {
+ if fn.NWBRCalls == nil {
continue
}
- for _, callee := range *fn.Func().NWBRCalls {
+ for _, callee := range *fn.NWBRCalls {
target := symToFunc[callee.Sym]
if target != nil {
enqueue(fn, target, callee.Pos)
import (
"cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
return
}
+ _ = ifn.(*ir.Func)
if base.Debug.DwarfInl != 0 {
base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
}
allLocs []*EscLocation
labels map[*types.Sym]labelState // known labels
- curfn ir.Node
+ curfn *ir.Func
// loopDepth counts the current loop nesting depth within
// curfn. It increments within each "for" loop and at each
// variable.
type EscLocation struct {
n ir.Node // represented variable or expression, if any
- curfn ir.Node // enclosing function
+ curfn *ir.Func // enclosing function
edges []EscEdge // incoming edges
loopDepth int // loopDepth at declaration
// escapeFuncs performs escape analysis on a minimal batch of
// functions.
-func escapeFuncs(fns []ir.Node, recursive bool) {
+func escapeFuncs(fns []*ir.Func, recursive bool) {
for _, fn := range fns {
if fn.Op() != ir.ODCLFUNC {
base.Fatalf("unexpected node: %v", fn)
e.finish(fns)
}
-func (e *Escape) initFunc(fn ir.Node) {
- if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown {
+func (e *Escape) initFunc(fn *ir.Func) {
+ if fn.Esc() != EscFuncUnknown {
base.Fatalf("unexpected node: %v", fn)
}
fn.SetEsc(EscFuncPlanned)
e.loopDepth = 1
// Allocate locations for local variables.
- for _, dcl := range fn.Func().Dcl {
+ for _, dcl := range fn.Dcl {
if dcl.Op() == ir.ONAME {
e.newLoc(dcl, false)
}
}
}
-func (e *Escape) walkFunc(fn ir.Node) {
+func (e *Escape) walkFunc(fn *ir.Func) {
fn.SetEsc(EscFuncStarted)
// Identify labels that mark the head of an unstructured loop.
for i := m.Type.NumResults(); i > 0; i-- {
ks = append(ks, e.heapHole())
}
- paramK := e.tagHole(ks, ir.AsNode(m.Nname), m.Type.Recv())
+ name, _ := m.Nname.(*ir.Name)
+ paramK := e.tagHole(ks, name, m.Type.Recv())
e.expr(e.teeHole(paramK, closureK), n.Left())
k = e.spill(k, n)
// Link addresses of captured variables to closure.
- for _, v := range n.Func().ClosureVars.Slice() {
- if v.Op() == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs
- continue
- }
-
+ for _, v := range n.Func().ClosureVars {
k := k
- if !v.Name().Byval() {
+ if !v.Byval() {
k = k.addr(v, "reference")
}
- e.expr(k.note(n, "captured by a closure"), v.Name().Defn)
+ e.expr(k.note(n, "captured by a closure"), v.Defn)
}
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
fixVariadicCall(call)
// Pick out the function callee, if statically known.
- var fn ir.Node
+ var fn *ir.Name
switch call.Op() {
case ir.OCALLFUNC:
switch v := staticValue(call.Left()); {
case v.Op() == ir.ONAME && v.Class() == ir.PFUNC:
- fn = v
+ fn = v.(*ir.Name)
case v.Op() == ir.OCLOSURE:
fn = v.Func().Nname
}
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
-func (e *Escape) tagHole(ks []EscHole, fn ir.Node, param *types.Field) EscHole {
+func (e *Escape) tagHole(ks []EscHole, fn *ir.Name, param *types.Field) EscHole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
// fn has not yet been analyzed, so its parameters and results
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
-func (e *Escape) inMutualBatch(fn ir.Node) bool {
- if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged {
- if fn.Name().Defn.Esc() == EscFuncUnknown {
+func (e *Escape) inMutualBatch(fn *ir.Name) bool {
+ if fn.Defn != nil && fn.Defn.Esc() < EscFuncTagged {
+ if fn.Defn.Esc() == EscFuncUnknown {
base.Fatalf("graph inconsistency")
}
return true
//
// var u int // okay to stack allocate
// *(func() *int { return &u }()) = 42
- if containsClosure(other.curfn, l.curfn) && l.curfn.Func().ClosureCalled {
+ if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
return false
}
}
// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c ir.Node) bool {
- if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC {
- base.Fatalf("bad containsClosure: %v, %v", f, c)
- }
-
+func containsClosure(f, c *ir.Func) bool {
// Common case.
if f == c {
return false
// Closures within function Foo are named like "Foo.funcN..."
// TODO(mdempsky): Better way to recognize this.
- fn := f.Func().Nname.Sym().Name
- cn := c.Func().Nname.Sym().Name
+ fn := f.Sym().Name
+ cn := c.Sym().Name
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
}
l.paramEsc.AddHeap(derefs)
}
-func (e *Escape) finish(fns []ir.Node) {
+func (e *Escape) finish(fns []*ir.Func) {
// Record parameter tags for package export data.
for _, fn := range fns {
fn.SetEsc(EscFuncTagged)
EscNever // By construction will not escape.
)
-// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn ir.Node) *types.Sym {
- if fn == nil || fn.Func().Nname == nil {
+// funcSym returns fn.Nname.Sym if no nils are encountered along the way.
+func funcSym(fn *ir.Func) *types.Sym {
+ if fn == nil || fn.Nname == nil {
return nil
}
- return fn.Func().Nname.Sym()
+ return fn.Sym()
}
// Mark labels that have no backjumps to them as not increasing e.loopdepth.
// Nothing to do.
case ir.ONAME:
+ n := n.(*ir.Name)
if n == nodfp {
break
}
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
oldfn := Curfn
Curfn = n.Name().Curfn
- if Curfn.Op() == ir.OCLOSURE {
- Curfn = Curfn.Func().Decl
- panic("can't happen")
- }
ln := base.Pos
base.Pos = Curfn.Pos()
moveToHeap(n)
}
// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n ir.Node) {
+func moveToHeap(n *ir.Name) {
if base.Flag.LowerR != 0 {
ir.Dump("MOVE", n)
}
// Unset AutoTemp to persist the &foo variable name through SSA to
// liveness analysis.
// TODO(mdempsky/drchase): Cleaner solution?
- heapaddr.Name().SetAutoTemp(false)
+ heapaddr.SetAutoTemp(false)
// Parameters have a local stack copy used at function start/end
// in addition to the copy in the heap that may live longer than
stackcopy.SetType(n.Type())
stackcopy.SetOffset(n.Offset())
stackcopy.SetClass(n.Class())
- stackcopy.Name().Heapaddr = heapaddr
+ stackcopy.Heapaddr = heapaddr
if n.Class() == ir.PPARAMOUT {
// Make sure the pointer to the heap copy is kept live throughout the function.
// The function could panic at any point, and then a defer could recover.
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
- heapaddr.Name().SetIsOutputParamHeapAddr(true)
+ heapaddr.SetIsOutputParamHeapAddr(true)
}
n.Name().Stackcopy = stackcopy
// liveness and other analyses use the underlying stack slot
// and not the now-pseudo-variable n.
found := false
- for i, d := range Curfn.Func().Dcl {
+ for i, d := range Curfn.Dcl {
if d == n {
- Curfn.Func().Dcl[i] = stackcopy
+ Curfn.Dcl[i] = stackcopy
found = true
break
}
if !found {
base.Fatalf("cannot find %v in local variable list", n)
}
- Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
+ Curfn.Dcl = append(Curfn.Dcl, n)
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
if n == nil {
return
}
+ name := n.(*ir.Name)
- n.SetFunc(new(ir.Func))
+ fn := ir.NewFunc(pos)
+ fn.SetType(t)
+ name.SetFunc(fn)
+ fn.Nname = name
if base.Flag.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
// make a new Node off the books
-func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) *ir.Name {
+func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
if curfn == nil {
base.Fatalf("no curfn for tempAt")
}
}
s := &types.Sym{
- Name: autotmpname(len(curfn.Func().Dcl)),
+ Name: autotmpname(len(curfn.Dcl)),
Pkg: ir.LocalPkg,
}
n := ir.NewNameAt(pos, s)
n.SetType(t)
n.SetClass(ir.PAUTO)
n.SetEsc(EscNever)
- n.Name().Curfn = curfn
- n.Name().SetUsed(true)
- n.Name().SetAutoTemp(true)
- curfn.Func().Dcl = append(curfn.Func().Dcl, n)
+ n.Curfn = curfn
+ n.SetUsed(true)
+ n.SetAutoTemp(true)
+ curfn.Dcl = append(curfn.Dcl, n)
dowidth(t)
var exportlist []ir.Node
-var importlist []ir.Node // imported functions and methods with inlinable bodies
+var importlist []*ir.Func // imported functions and methods with inlinable bodies
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
var dclcontext ir.Class // PEXTERN/PAUTO
-var Curfn ir.Node
+var Curfn *ir.Func
var Widthptr int
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
-var nodfp ir.Node
+var nodfp *ir.Name
var autogeneratedPos src.XPos
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
- curfn ir.Node // fn these Progs are for
+ curfn *ir.Func // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
-func newProgs(fn ir.Node, worker int) *Progs {
+func newProgs(fn *ir.Func, worker int) *Progs {
pp := new(Progs)
if base.Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / base.Flag.LowerC
return q
}
-func (pp *Progs) settext(fn ir.Node) {
+func (pp *Progs) settext(fn *ir.Func) {
if pp.Text != nil {
base.Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
- fn.Func().LSym.Func().Text = ptxt
+ fn.LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
- ptxt.From.Sym = fn.Func().LSym
+ ptxt.From.Sym = fn.LSym
}
// initLSym defines f's obj.LSym and initializes it based on the
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
if base.Ctxt.Pkgpath == "reflect" {
- switch f.Nname.Sym().Name {
+ switch f.Sym().Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
switch n.Op() {
case ir.ONAME:
+ n := n.(*ir.Name)
switch n.Class() {
case ir.PEXTERN:
// Variable.
w.data.WriteByte(tag)
}
-func (p *iexporter) doInline(f ir.Node) {
+func (p *iexporter) doInline(f *ir.Name) {
w := p.newWriter()
w.setPkg(fnpkg(f), false)
w.symIdx(n.Sym())
}
-func (w *exportWriter) funcExt(n ir.Node) {
+func (w *exportWriter) funcExt(n *ir.Name) {
w.linkname(n.Sym())
w.symIdx(n.Sym())
}
// Endlineno for inlined function.
- if n.Name().Defn != nil {
- w.pos(n.Name().Defn.Func().Endlineno)
- } else {
- // When the exported node was defined externally,
- // e.g. io exports atomic.(*Value).Load or bytes exports errors.New.
- // Keep it as we don't distinguish this case in iimport.go.
- w.pos(n.Func().Endlineno)
- }
+ w.pos(n.Func().Endlineno)
} else {
w.uint64(0)
}
func (w *exportWriter) methExt(m *types.Field) {
w.bool(m.Nointerface())
- w.funcExt(ir.AsNode(m.Nname))
+ w.funcExt(ir.AsNode(m.Nname).(*ir.Name))
}
func (w *exportWriter) linkname(s *types.Sym) {
inlineImporter = map[*types.Sym]iimporterAndOffset{}
)
-func expandDecl(n ir.Node) {
+func expandDecl(n *ir.Name) {
if n.Op() != ir.ONONAME {
return
}
r.doDecl(n)
}
-func expandInline(fn ir.Node) {
- if fn.Func().Inl.Body != nil {
+func expandInline(fn *ir.Func) {
+ if fn.Inl.Body != nil {
return
}
- r := importReaderFor(fn, inlineImporter)
+ r := importReaderFor(fn.Nname, inlineImporter)
if r == nil {
base.Fatalf("missing import reader for %v", fn)
}
r.doInline(fn)
}
-func importReaderFor(n ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
+func importReaderFor(n *ir.Name, importers map[*types.Sym]iimporterAndOffset) *importReader {
x, ok := importers[n.Sym()]
if !ok {
return nil
recv := r.param()
mtyp := r.signature(recv)
- m := newfuncnamel(mpos, methodSym(recv.Type, msym), new(ir.Func))
+ fn := ir.NewFunc(mpos)
+ fn.SetType(mtyp)
+ m := newFuncNameAt(mpos, methodSym(recv.Type, msym), fn)
m.SetType(mtyp)
m.SetClass(ir.PFUNC)
// methodSym already marked m.Sym as a function.
// type.
n := ir.AsNode(r.qualifiedIdent().PkgDef())
if n.Op() == ir.ONONAME {
- expandDecl(n)
+ expandDecl(n.(*ir.Name))
}
if n.Op() != ir.OTYPE {
base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
// so we can use index to reference the symbol.
var typeSymIdx = make(map[*types.Type][2]int64)
-func (r *importReader) doInline(n ir.Node) {
- if len(n.Func().Inl.Body) != 0 {
- base.Fatalf("%v already has inline body", n)
+func (r *importReader) doInline(fn *ir.Func) {
+ if len(fn.Inl.Body) != 0 {
+ base.Fatalf("%v already has inline body", fn)
}
- funchdr(n)
+ funchdr(fn)
body := r.stmtList()
funcbody()
if body == nil {
// functions).
body = []ir.Node{}
}
- n.Func().Inl.Body = body
+ fn.Inl.Body = body
- importlist = append(importlist, n)
+ importlist = append(importlist, fn)
if base.Flag.E > 0 && base.Flag.LowerM > 2 {
if base.Flag.LowerM > 3 {
- fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body))
+ fmt.Printf("inl body for %v %#v: %+v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body))
} else {
- fmt.Printf("inl body for %v %#v: %v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body))
+ fmt.Printf("inl body for %v %#v: %v\n", fn, fn.Type(), ir.AsNodes(fn.Inl.Body))
}
}
}
caseVar := ir.NewNameAt(cas.Pos(), r.ident())
declare(caseVar, dclcontext)
cas.PtrRlist().Set1(caseVar)
- caseVar.Name().Defn = sw.Left()
+ caseVar.Defn = sw.Left()
}
cas.PtrBody().Set(r.stmtList())
cases[i] = cas
// Function collecting autotmps generated during typechecking,
// to be included in the package-level init function.
-var initTodo = ir.Nod(ir.ODCLFUNC, nil, nil)
+var initTodo = ir.NewFunc(base.Pos)
func renameinit() *types.Sym {
s := lookupN("init.", renameinitgen)
base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
initializers := lookup("init")
fn := dclfunc(initializers, ir.Nod(ir.OTFUNC, nil, nil))
- for _, dcl := range initTodo.Func().Dcl {
+ for _, dcl := range initTodo.Dcl {
dcl.Name().Curfn = fn
}
- fn.Func().Dcl = append(fn.Func().Dcl, initTodo.Func().Dcl...)
- initTodo.Func().Dcl = nil
+ fn.Dcl = append(fn.Dcl, initTodo.Dcl...)
+ initTodo.Dcl = nil
fn.PtrBody().Set(nf)
funcbody()
- fn = typecheck(fn, ctxStmt)
+ typecheckFunc(fn)
Curfn = fn
typecheckslice(nf, ctxStmt)
Curfn = nil
xtop = append(xtop, fn)
fns = append(fns, initializers.Linksym())
}
- if initTodo.Func().Dcl != nil {
+ if initTodo.Dcl != nil {
// We only generate temps using initTodo if there
// are package-scope initialization statements, so
// something's weird if we get here.
case ir.ONAME:
switch n.Class() {
case ir.PEXTERN, ir.PFUNC:
- d.foundDep(n)
+ d.foundDep(n.(*ir.Name))
}
case ir.OCLOSURE:
- d.inspectList(n.Func().Decl.Body())
+ d.inspectList(n.Func().Body())
case ir.ODOTMETH, ir.OCALLPART:
d.foundDep(methodExprName(n))
// foundDep records that we've found a dependency on n by adding it to
// seen.
-func (d *initDeps) foundDep(n ir.Node) {
+func (d *initDeps) foundDep(n *ir.Name) {
// Can happen with method expressions involving interface
// types; e.g., fixedbugs/issue4495.go.
if n == nil {
// Names without definitions aren't interesting as far as
// initialization ordering goes.
- if n.Name().Defn == nil {
+ if n.Defn == nil {
return
}
}
d.seen.Add(n)
if d.transitive && n.Class() == ir.PFUNC {
- d.inspectList(n.Name().Defn.Body())
+ d.inspectList(n.Defn.Body())
}
}
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn ir.Node) *types.Pkg {
+func fnpkg(fn *ir.Name) *types.Pkg {
if ir.IsMethod(fn) {
// method
rcvr := fn.Type().Recv().Type
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
-func typecheckinl(fn ir.Node) {
- lno := setlineno(fn)
+func typecheckinl(fn *ir.Func) {
+ lno := setlineno(fn.Nname)
expandInline(fn)
// their bodies may refer to unsafe as long as the package
// was marked safe during import (which was checked then).
// the ->inl of a local function has been typechecked before caninl copied it.
- pkg := fnpkg(fn)
+ pkg := fnpkg(fn.Nname)
if pkg == ir.LocalPkg || pkg == nil {
return // typecheckinl on local function
}
if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
- fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym(), fn, ir.AsNodes(fn.Func().Inl.Body))
+ fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym(), fn, ir.AsNodes(fn.Inl.Body))
}
savefn := Curfn
Curfn = fn
- typecheckslice(fn.Func().Inl.Body, ctxStmt)
+ typecheckslice(fn.Inl.Body, ctxStmt)
Curfn = savefn
// During expandInline (which imports fn.Func.Inl.Body),
// to fn.Func.Inl.Dcl for consistency with how local functions
// behave. (Append because typecheckinl may be called multiple
// times.)
- fn.Func().Inl.Dcl = append(fn.Func().Inl.Dcl, fn.Func().Dcl...)
- fn.Func().Dcl = nil
+ fn.Inl.Dcl = append(fn.Inl.Dcl, fn.Dcl...)
+ fn.Dcl = nil
base.Pos = lno
}
// Caninl determines whether fn is inlineable.
// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
-func caninl(fn ir.Node) {
- if fn.Op() != ir.ODCLFUNC {
- base.Fatalf("caninl %v", fn)
- }
- if fn.Func().Nname == nil {
+func caninl(fn *ir.Func) {
+ if fn.Nname == nil {
base.Fatalf("caninl no nname %+v", fn)
}
defer func() {
if reason != "" {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Func().Nname, reason)
+ fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason)
}
if logopt.Enabled() {
logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
}
// If marked "go:noinline", don't inline
- if fn.Func().Pragma&ir.Noinline != 0 {
+ if fn.Pragma&ir.Noinline != 0 {
reason = "marked go:noinline"
return
}
// If marked "go:norace" and -race compilation, don't inline.
- if base.Flag.Race && fn.Func().Pragma&ir.Norace != 0 {
+ if base.Flag.Race && fn.Pragma&ir.Norace != 0 {
reason = "marked go:norace with -race compilation"
return
}
// If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
- if base.Debug.Checkptr != 0 && fn.Func().Pragma&ir.NoCheckPtr != 0 {
+ if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 {
reason = "marked go:nocheckptr"
return
}
// If marked "go:cgo_unsafe_args", don't inline, since the
// function makes assumptions about its argument frame layout.
- if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 {
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
reason = "marked go:cgo_unsafe_args"
return
}
// If marked as "go:uintptrescapes", don't inline, since the
// escape information is lost during inlining.
- if fn.Func().Pragma&ir.UintptrEscapes != 0 {
+ if fn.Pragma&ir.UintptrEscapes != 0 {
reason = "marked as having an escaping uintptr argument"
return
}
// granularity, so inlining yeswritebarrierrec functions can
// confuse it (#22342). As a workaround, disallow inlining
// them for now.
- if fn.Func().Pragma&ir.Yeswritebarrierrec != 0 {
+ if fn.Pragma&ir.Yeswritebarrierrec != 0 {
reason = "marked go:yeswritebarrierrec"
return
}
base.Fatalf("caninl on non-typechecked function %v", fn)
}
- n := fn.Func().Nname
+ n := fn.Nname
if n.Func().InlinabilityChecked() {
return
}
n.Func().Inl = &ir.Inline{
Cost: inlineMaxBudget - visitor.budget,
- Dcl: inlcopylist(pruneUnusedAutos(n.Name().Defn.Func().Dcl, &visitor)),
+ Dcl: pruneUnusedAutos(n.Defn.Func().Dcl, &visitor),
Body: inlcopylist(fn.Body().Slice()),
}
// inlFlood marks n's inline body for export and recursively ensures
// all called functions are marked too.
-func inlFlood(n ir.Node) {
+func inlFlood(n *ir.Name) {
if n == nil {
return
}
if n.Op() != ir.ONAME || n.Class() != ir.PFUNC {
base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class())
}
- if n.Func() == nil {
+ fn := n.Func()
+ if fn == nil {
base.Fatalf("inlFlood: missing Func on %v", n)
}
- if n.Func().Inl == nil {
+ if fn.Inl == nil {
return
}
- if n.Func().ExportInline() {
+ if fn.ExportInline() {
return
}
- n.Func().SetExportInline(true)
+ fn.SetExportInline(true)
- typecheckinl(n)
+ typecheckinl(fn)
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
- ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n ir.Node) bool {
+ ir.InspectList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) bool {
switch n.Op() {
- case ir.OMETHEXPR:
+ case ir.OMETHEXPR, ir.ODOTMETH:
inlFlood(methodExprName(n))
case ir.ONAME:
+ n := n.(*ir.Name)
switch n.Class() {
case ir.PFUNC:
inlFlood(n)
exportsym(n)
}
- case ir.ODOTMETH:
- fn := methodExprName(n)
- inlFlood(fn)
-
case ir.OCALLPART:
// Okay, because we don't yet inline indirect
// calls to method values.
break
}
- if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil {
- v.budget -= fn.Func().Inl.Cost
+ if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil {
+ v.budget -= fn.Inl.Cost
break
}
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
-func inlcalls(fn ir.Node) {
+func inlcalls(fn *ir.Func) {
savefn := Curfn
Curfn = fn
maxCost := int32(inlineMaxBudget)
// but allow inlining if there is a recursion cycle of many functions.
// Most likely, the inlining will stop before we even hit the beginning of
// the cycle again, but the map catches the unusual case.
- inlMap := make(map[ir.Node]bool)
- fn = inlnode(fn, maxCost, inlMap)
+ inlMap := make(map[*ir.Func]bool)
+ fn = inlnode(fn, maxCost, inlMap).(*ir.Func)
if fn != Curfn {
base.Fatalf("inlnode replaced curfn")
}
return s
}
-func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[ir.Node]bool) {
+func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Func]bool) {
s := l.Slice()
for i := range s {
s[i] = inlnode(s[i], maxCost, inlMap)
// shorter and less complicated.
// The result of inlnode MUST be assigned back to n, e.g.
// n.Left = inlnode(n.Left)
-func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node {
+func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool) ir.Node {
if n == nil {
return n
}
if isIntrinsicCall(n) {
break
}
- if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil {
+ if fn := inlCallee(n.Left()); fn != nil && fn.Inl != nil {
n = mkinlcall(n, fn, maxCost, inlMap)
}
base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left())
}
- n = mkinlcall(n, methodExprName(n.Left()), maxCost, inlMap)
+ n = mkinlcall(n, methodExprName(n.Left()).Func(), maxCost, inlMap)
}
base.Pos = lno
// inlCallee takes a function-typed expression and returns the underlying function ONAME
// that it refers to if statically known. Otherwise, it returns nil.
-func inlCallee(fn ir.Node) ir.Node {
+func inlCallee(fn ir.Node) *ir.Func {
fn = staticValue(fn)
switch {
case fn.Op() == ir.OMETHEXPR:
if n == nil || !types.Identical(n.Type().Recv().Type, fn.Left().Type()) {
return nil
}
- return n
+ return n.Func()
case fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC:
- return fn
+ return fn.Func()
case fn.Op() == ir.OCLOSURE:
- c := fn.Func().Decl
+ c := fn.Func()
caninl(c)
- return c.Func().Nname
+ return c
}
return nil
}
base.Fatalf("RHS is nil: %v", defn)
}
- unsafe, _ := reassigned(n)
+ unsafe, _ := reassigned(n.(*ir.Name))
if unsafe {
return nil
}
// useful for -m output documenting the reason for inhibited optimizations.
// NB: global variables are always considered to be re-assigned.
// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(n ir.Node) (bool, ir.Node) {
+func reassigned(n *ir.Name) (bool, ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("reassigned %v", n)
}
// no way to reliably check for no-reassignment of globals, assume it can be
- if n.Name().Curfn == nil {
+ if n.Curfn == nil {
return true, nil
}
- f := n.Name().Curfn
- // There just might be a good reason for this although this can be pretty surprising:
- // local variables inside a closure have Curfn pointing to the OCLOSURE node instead
- // of the corresponding ODCLFUNC.
- // We need to walk the function body to check for reassignments so we follow the
- // linkage to the ODCLFUNC node as that is where body is held.
- if f.Op() == ir.OCLOSURE {
- f = f.Func().Decl
- }
+ f := n.Curfn
v := reassignVisitor{name: n}
a := v.visitList(f.Body())
return a != nil, a
return nil
}
-func inlParam(t *types.Field, as ir.Node, inlvars map[ir.Node]ir.Node) ir.Node {
+func inlParam(t *types.Field, as ir.Node, inlvars map[*ir.Name]ir.Node) ir.Node {
n := ir.AsNode(t.Nname)
if n == nil || ir.IsBlank(n) {
return ir.BlankNode
}
- inlvar := inlvars[n]
+ inlvar := inlvars[n.(*ir.Name)]
if inlvar == nil {
base.Fatalf("missing inlvar for %v", n)
}
// parameters.
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node {
- if fn.Func().Inl == nil {
+func mkinlcall(n ir.Node, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool) ir.Node {
+ if fn.Inl == nil {
if logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
}
return n
}
- if fn.Func().Inl.Cost > maxCost {
+ if fn.Inl.Cost > maxCost {
// The inlined function body is too big. Typically we use this check to restrict
// inlining into very big functions. See issue 26546 and 17566.
if logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
- fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func().Inl.Cost, ir.PkgFuncName(fn), maxCost))
+ fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
}
return n
}
- if fn == Curfn || fn.Name().Defn == Curfn {
+ if fn == Curfn {
// Can't recursively inline a function into itself.
if logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn)))
// We have a function node, and it has an inlineable body.
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Func().Inl.Body))
+ fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Inl.Body))
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
}
// Make temp names to use instead of the originals.
- inlvars := make(map[ir.Node]ir.Node)
+ inlvars := make(map[*ir.Name]ir.Node)
// record formals/locals for later post-processing
var inlfvars []ir.Node
// Handle captured variables when inlining closures.
- if fn.Name().Defn != nil {
- if c := fn.Name().Defn.Func().OClosure; c != nil {
- for _, v := range c.Func().ClosureVars.Slice() {
- if v.Op() == ir.OXXX {
- continue
- }
+ if c := fn.OClosure; c != nil {
+ for _, v := range c.Func().ClosureVars {
+ if v.Op() == ir.OXXX {
+ continue
+ }
- o := v.Name().Outer
- // make sure the outer param matches the inlining location
- // NB: if we enabled inlining of functions containing OCLOSURE or refined
- // the reassigned check via some sort of copy propagation this would most
- // likely need to be changed to a loop to walk up to the correct Param
- if o == nil || (o.Name().Curfn != Curfn && o.Name().Curfn.Func().OClosure != Curfn) {
- base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v)
- }
+ o := v.Outer
+ // make sure the outer param matches the inlining location
+ // NB: if we enabled inlining of functions containing OCLOSURE or refined
+ // the reassigned check via some sort of copy propagation this would most
+ // likely need to be changed to a loop to walk up to the correct Param
+ if o == nil || (o.Curfn != Curfn && o.Curfn.OClosure != Curfn) {
+ base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v)
+ }
- if v.Name().Byval() {
- iv := typecheck(inlvar(v), ctxExpr)
- ninit.Append(ir.Nod(ir.ODCL, iv, nil))
- ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt))
- inlvars[v] = iv
- } else {
- addr := NewName(lookup("&" + v.Sym().Name))
- addr.SetType(types.NewPtr(v.Type()))
- ia := typecheck(inlvar(addr), ctxExpr)
- ninit.Append(ir.Nod(ir.ODCL, ia, nil))
- ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt))
- inlvars[addr] = ia
-
- // When capturing by reference, all occurrence of the captured var
- // must be substituted with dereference of the temporary address
- inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr)
- }
+ if v.Byval() {
+ iv := typecheck(inlvar(v), ctxExpr)
+ ninit.Append(ir.Nod(ir.ODCL, iv, nil))
+ ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt))
+ inlvars[v] = iv
+ } else {
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ ia := typecheck(inlvar(addr), ctxExpr)
+ ninit.Append(ir.Nod(ir.ODCL, ia, nil))
+ ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt))
+ inlvars[addr] = ia
+
+ // When capturing by reference, all occurrence of the captured var
+ // must be substituted with dereference of the temporary address
+ inlvars[v] = typecheck(ir.Nod(ir.ODEREF, ia, nil), ctxExpr)
}
}
}
- for _, ln := range fn.Func().Inl.Dcl {
+ for _, ln := range fn.Inl.Dcl {
if ln.Op() != ir.ONAME {
continue
}
}
nreturns := 0
- ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n ir.Node) bool {
+ ir.InspectList(ir.AsNodes(fn.Inl.Body), func(n ir.Node) bool {
if n != nil && n.Op() == ir.ORETURN {
nreturns++
}
for i, t := range fn.Type().Results().Fields().Slice() {
var m ir.Node
if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") {
+ n := n.(*ir.Name)
m = inlvar(n)
m = typecheck(m, ctxExpr)
inlvars[n] = m
if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
parent = b.InliningIndex()
}
- newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), fn.Sym().Linksym())
+
+ sym := fn.Sym().Linksym()
+ newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
ninit.Append(inlMark)
if base.Flag.GenDwarfInl > 0 {
- if !fn.Sym().Linksym().WasInlined() {
- base.Ctxt.DwFixups.SetPrecursorFunc(fn.Sym().Linksym(), fn)
- fn.Sym().Linksym().Set(obj.AttrWasInlined, true)
+ if !sym.WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
+ sym.Set(obj.AttrWasInlined, true)
}
}
newInlIndex: newIndex,
}
- body := subst.list(ir.AsNodes(fn.Func().Inl.Body))
+ body := subst.list(ir.AsNodes(fn.Inl.Body))
lab := nodSym(ir.OLABEL, nil, retlabel)
body = append(body, lab)
n := NewName(var_.Sym())
n.SetType(var_.Type())
n.SetClass(ir.PAUTO)
- n.Name().SetUsed(true)
- n.Name().Curfn = Curfn // the calling function, not the called one
- n.Name().SetAddrtaken(var_.Name().Addrtaken())
+ n.SetUsed(true)
+ n.Curfn = Curfn // the calling function, not the called one
+ n.SetAddrtaken(var_.Name().Addrtaken())
- Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
+ Curfn.Dcl = append(Curfn.Dcl, n)
return n
}
n := NewName(lookupN("~R", i))
n.SetType(t.Type)
n.SetClass(ir.PAUTO)
- n.Name().SetUsed(true)
- n.Name().Curfn = Curfn // the calling function, not the called one
- Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
+ n.SetUsed(true)
+ n.Curfn = Curfn // the calling function, not the called one
+ Curfn.Dcl = append(Curfn.Dcl, n)
return n
}
n := NewName(lookupN("~arg", i))
n.SetType(t.Elem())
n.SetClass(ir.PAUTO)
- n.Name().SetUsed(true)
- n.Name().Curfn = Curfn // the calling function, not the called one
- Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
+ n.SetUsed(true)
+ n.Curfn = Curfn // the calling function, not the called one
+ Curfn.Dcl = append(Curfn.Dcl, n)
return n
}
// "return" statement.
delayretvars bool
- inlvars map[ir.Node]ir.Node
+ inlvars map[*ir.Name]ir.Node
// bases maps from original PosBase to PosBase with an extra
// inlined call frame.
switch n.Op() {
case ir.ONAME:
+ n := n.(*ir.Name)
if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
if base.Flag.LowerM > 2 {
fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
return base.Ctxt.PosTable.XPos(pos)
}
-func pruneUnusedAutos(ll []ir.Node, vis *hairyVisitor) []ir.Node {
- s := make([]ir.Node, 0, len(ll))
+func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
+ s := make([]*ir.Name, 0, len(ll))
for _, n := range ll {
if n.Class() == ir.PAUTO {
if _, found := vis.usedLocals[n]; !found {
// devirtualize replaces interface method calls within fn with direct
// concrete-type method calls where applicable.
-func devirtualize(fn ir.Node) {
+func devirtualize(fn *ir.Func) {
Curfn = fn
ir.InspectList(fn.Body(), func(n ir.Node) bool {
if n.Op() == ir.OCALLINTER {
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if n.Op() == ir.ODCLFUNC {
- Curfn = n
+ Curfn = n.(*ir.Func)
decldepth = 1
errorsBefore := base.Errors()
typecheckslice(Curfn.Body().Slice(), ctxStmt)
timings.Start("fe", "capturevars")
for _, n := range xtop {
if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil {
- Curfn = n
- capturevars(n)
+ Curfn = n.(*ir.Func)
+ capturevars(Curfn)
}
}
capturevarscomplete = true
// Typecheck imported function bodies if Debug.l > 1,
// otherwise lazily when used or re-exported.
for _, n := range importlist {
- if n.Func().Inl != nil {
+ if n.Inl != nil {
typecheckinl(n)
}
}
if base.Flag.LowerL != 0 {
// Find functions that can be inlined and clone them before walk expands them.
- visitBottomUp(xtop, func(list []ir.Node, recursive bool) {
+ visitBottomUp(xtop, func(list []*ir.Func, recursive bool) {
numfns := numNonClosures(list)
for _, n := range list {
if !recursive || numfns > 1 {
caninl(n)
} else {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Func().Nname)
+ fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
}
}
inlcalls(n)
for _, n := range xtop {
if n.Op() == ir.ODCLFUNC {
- devirtualize(n)
+ devirtualize(n.(*ir.Func))
}
}
Curfn = nil
timings.Start("fe", "xclosures")
for _, n := range xtop {
if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil {
- Curfn = n
- transformclosure(n)
+ Curfn = n.(*ir.Func)
+ transformclosure(Curfn)
}
}
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if n.Op() == ir.ODCLFUNC {
- funccompile(n)
+ funccompile(n.(*ir.Func))
fcount++
}
}
}
// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []ir.Node) int {
+func numNonClosures(list []*ir.Func) int {
count := 0
- for _, n := range list {
- if n.Func().OClosure == nil {
+ for _, fn := range list {
+ if fn.OClosure == nil {
count++
}
}
lastCloseScopePos syntax.Pos
}
-func (p *noder) funcBody(fn ir.Node, block *syntax.BlockStmt) {
+func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) {
oldScope := p.scope
p.scope = 0
funchdr(fn)
fn.PtrBody().Set(body)
base.Pos = p.makeXPos(block.Rbrace)
- fn.Func().Endlineno = base.Pos
+ fn.Endlineno = base.Pos
}
funcbody()
types.Markdcl()
if trackScopes {
- Curfn.Func().Parents = append(Curfn.Func().Parents, p.scope)
- p.scopeVars = append(p.scopeVars, len(Curfn.Func().Dcl))
- p.scope = ir.ScopeID(len(Curfn.Func().Parents))
+ Curfn.Parents = append(Curfn.Parents, p.scope)
+ p.scopeVars = append(p.scopeVars, len(Curfn.Dcl))
+ p.scope = ir.ScopeID(len(Curfn.Parents))
p.markScope(pos)
}
if trackScopes {
scopeVars := p.scopeVars[len(p.scopeVars)-1]
p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
- if scopeVars == len(Curfn.Func().Dcl) {
+ if scopeVars == len(Curfn.Dcl) {
// no variables were declared in this scope, so we can retract it.
- if int(p.scope) != len(Curfn.Func().Parents) {
+ if int(p.scope) != len(Curfn.Parents) {
base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
}
- p.scope = Curfn.Func().Parents[p.scope-1]
- Curfn.Func().Parents = Curfn.Func().Parents[:len(Curfn.Func().Parents)-1]
+ p.scope = Curfn.Parents[p.scope-1]
+ Curfn.Parents = Curfn.Parents[:len(Curfn.Parents)-1]
- nmarks := len(Curfn.Func().Marks)
- Curfn.Func().Marks[nmarks-1].Scope = p.scope
+ nmarks := len(Curfn.Marks)
+ Curfn.Marks[nmarks-1].Scope = p.scope
prevScope := ir.ScopeID(0)
if nmarks >= 2 {
- prevScope = Curfn.Func().Marks[nmarks-2].Scope
+ prevScope = Curfn.Marks[nmarks-2].Scope
}
- if Curfn.Func().Marks[nmarks-1].Scope == prevScope {
- Curfn.Func().Marks = Curfn.Func().Marks[:nmarks-1]
+ if Curfn.Marks[nmarks-1].Scope == prevScope {
+ Curfn.Marks = Curfn.Marks[:nmarks-1]
}
return
}
- p.scope = Curfn.Func().Parents[p.scope-1]
+ p.scope = Curfn.Parents[p.scope-1]
p.markScope(pos)
}
func (p *noder) markScope(pos syntax.Pos) {
xpos := p.makeXPos(pos)
- if i := len(Curfn.Func().Marks); i > 0 && Curfn.Func().Marks[i-1].Pos == xpos {
- Curfn.Func().Marks[i-1].Scope = p.scope
+ if i := len(Curfn.Marks); i > 0 && Curfn.Marks[i-1].Pos == xpos {
+ Curfn.Marks[i-1].Scope = p.scope
} else {
- Curfn.Func().Marks = append(Curfn.Func().Marks, ir.Mark{Pos: xpos, Scope: p.scope})
+ Curfn.Marks = append(Curfn.Marks, ir.Mark{Pos: xpos, Scope: p.scope})
}
}
nn := make([]ir.Node, 0, len(names))
for i, n := range names {
+ n := n.(*ir.Name)
if i >= len(values) {
base.Errorf("missing value in const declaration")
break
n.SetOp(ir.OLITERAL)
declare(n, dclcontext)
- n.Name().Ntype = typ
- n.Name().Defn = v
+ n.Ntype = typ
+ n.Defn = v
n.SetIota(cs.iota)
nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil))
func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
name := p.name(fun.Name)
t := p.signature(fun.Recv, fun.Type)
- f := p.nod(fun, ir.ODCLFUNC, nil, nil)
+ f := ir.NewFunc(p.pos(fun))
if fun.Recv == nil {
if name.Name == "init" {
}
}
} else {
- f.Func().Shortname = name
+ f.Shortname = name
name = ir.BlankNode.Sym() // filled in by typecheckfunc
}
- f.Func().Nname = newfuncnamel(p.pos(fun.Name), name, f.Func())
- f.Func().Nname.Name().Defn = f
- f.Func().Nname.Name().Ntype = t
+ f.Nname = newFuncNameAt(p.pos(fun.Name), name, f)
+ f.Nname.Defn = f
+ f.Nname.Ntype = t
if pragma, ok := fun.Pragma.(*Pragma); ok {
- f.Func().Pragma = pragma.Flag & FuncPragmas
+ f.Pragma = pragma.Flag & FuncPragmas
if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
}
if fun.Recv == nil {
- declare(f.Func().Nname, ir.PFUNC)
+ declare(f.Nname, ir.PFUNC)
}
p.funcBody(f, fun.Body)
if fun.Body != nil {
- if f.Func().Pragma&ir.Noescape != 0 {
+ if f.Pragma&ir.Noescape != 0 {
base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations")
}
} else {
n := p.nod(stmt, ir.ORETURN, nil, nil)
n.PtrList().Set(results)
if n.List().Len() == 0 && Curfn != nil {
- for _, ln := range Curfn.Func().Dcl {
+ for _, ln := range Curfn.Dcl {
if ln.Class() == ir.PPARAM {
continue
}
newOrErr = true
n := NewName(sym)
declare(n, dclcontext)
- n.Name().Defn = defn
+ n.Defn = defn
defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
res[i] = n
}
declare(nn, dclcontext)
n.PtrRlist().Set1(nn)
// keep track of the instances for reporting unused
- nn.Name().Defn = tswitch
+ nn.Defn = tswitch
}
// Trim trailing empty statements. We omit them from
for i := xtops; i < len(xtop); i++ {
n := xtop[i]
if n.Op() == ir.ODCLFUNC {
- funccompile(n)
+ funccompile(n.(*ir.Func))
}
}
xtops = len(xtop)
// Order rewrites fn.Nbody to apply the ordering constraints
// described in the comment at the top of the file.
-func order(fn ir.Node) {
+func order(fn *ir.Func) {
if base.Flag.W > 1 {
- s := fmt.Sprintf("\nbefore order %v", fn.Func().Nname.Sym())
+ s := fmt.Sprintf("\nbefore order %v", fn.Sym())
ir.DumpList(s, fn.Body())
}
}
case ir.OCLOSURE:
- if n.Transient() && n.Func().ClosureVars.Len() > 0 {
+ if n.Transient() && len(n.Func().ClosureVars) > 0 {
prealloc[n] = o.newTemp(closureType(n), false)
}
// "Portable" code generation.
var (
- compilequeue []ir.Node // functions waiting to be compiled
+ compilequeue []*ir.Func // functions waiting to be compiled
)
-func emitptrargsmap(fn ir.Node) {
- if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" {
+func emitptrargsmap(fn *ir.Func) {
+ if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
return
}
- lsym := base.Ctxt.Lookup(fn.Func().LSym.Name + ".args_stackmap")
+ lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
nptr := int(fn.Type().ArgWidth() / int64(Widthptr))
bv := bvalloc(int32(nptr) * 2)
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
-func cmpstackvarlt(a, b ir.Node) bool {
+func cmpstackvarlt(a, b *ir.Name) bool {
if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) {
return b.Class() == ir.PAUTO
}
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
-type byStackVar []ir.Node
+type byStackVar []*ir.Name
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
- fn := s.curfn.Func()
+ fn := s.curfn
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
}
-func funccompile(fn ir.Node) {
+func funccompile(fn *ir.Func) {
if Curfn != nil {
- base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym())
+ base.Fatalf("funccompile %v inside %v", fn.Sym(), Curfn.Sym())
}
if fn.Type() == nil {
if fn.Body().Len() == 0 {
// Initialize ABI wrappers if necessary.
- initLSym(fn.Func(), false)
+ initLSym(fn, false)
emitptrargsmap(fn)
return
}
dclcontext = ir.PAUTO
Curfn = fn
-
compile(fn)
-
Curfn = nil
dclcontext = ir.PEXTERN
}
-func compile(fn ir.Node) {
+func compile(fn *ir.Func) {
errorsBefore := base.Errors()
order(fn)
if base.Errors() > errorsBefore {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
- initLSym(fn.Func(), true)
+ initLSym(fn, true)
walk(fn)
if base.Errors() > errorsBefore {
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
- for _, n := range fn.Func().Dcl {
+ for _, n := range fn.Dcl {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if livenessShouldTrack(n) && n.Name().Addrtaken() {
dtypesym(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
- if fn.Func().LSym.Func().StackObjects == nil {
- fn.Func().LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func().LSym.Name + ".stkobj")
+ if fn.LSym.Func().StackObjects == nil {
+ fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
}
}
}
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
-func compilenow(fn ir.Node) bool {
+func compilenow(fn *ir.Func) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
-func isInlinableButNotInlined(fn ir.Node) bool {
- if fn.Func().Nname.Func().Inl == nil {
+func isInlinableButNotInlined(fn *ir.Func) bool {
+ if fn.Inl == nil {
return false
}
if fn.Sym() == nil {
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
-func compileSSA(fn ir.Node, worker int) {
+func compileSSA(fn *ir.Func, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
- fieldtrack(pp.Text.From.Sym, fn.Func().FieldTrack)
+ fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
}
func init() {
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
- tmp := make([]ir.Node, len(compilequeue))
+ tmp := make([]*ir.Func, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
var wg sync.WaitGroup
base.Ctxt.InParallel = true
- c := make(chan ir.Node, base.Flag.LowerC)
+ c := make(chan *ir.Func, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
- fn := curfn.(ir.Node)
- if fn.Func().Nname != nil {
- if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect {
+ fn := curfn.(*ir.Func)
+
+ if fn.Nname != nil {
+ if expect := fn.Sym().Linksym(); fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
}
}
//
// These two adjustments keep toolstash -cmp working for now.
// Deciding the right answer is, as they say, future work.
- isODCLFUNC := fn.Op() == ir.ODCLFUNC
+ //
+ // We can tell the difference between the old ODCLFUNC and ONAME
+ // cases by looking at the infosym.Name. If it's empty, DebugInfo is
+ // being called from (*obj.Link).populateDWARF, which used to use
+ // the ODCLFUNC. If it's non-empty (the name will end in $abstract),
+ // DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
+ // which used to use the ONAME form.
+ isODCLFUNC := infosym.Name == ""
var apdecls []ir.Node
// Populate decls for fn.
if isODCLFUNC {
- for _, n := range fn.Func().Dcl {
+ for _, n := range fn.Dcl {
if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
continue
}
}
}
- decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func(), apdecls)
+ decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
// For each type referenced by the functions auto vars but not
// already referenced by a dwarf var, attach an R_USETYPE relocation to
var varScopes []ir.ScopeID
for _, decl := range decls {
pos := declPos(decl)
- varScopes = append(varScopes, findScope(fn.Func().Marks, pos))
+ varScopes = append(varScopes, findScope(fn.Marks, pos))
}
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
func preInliningDcls(fnsym *obj.LSym) []ir.Node {
- fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(ir.Node)
+ fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
var rdcl []ir.Node
- for _, n := range fn.Func().Inl.Dcl {
+ for _, n := range fn.Inl.Dcl {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
return t
}
-func markUsed(n ir.Node) ir.Node {
- n.Name().SetUsed(true)
+func markUsed(n *ir.Name) *ir.Name {
+ n.SetUsed(true)
return n
}
-func markNeedZero(n ir.Node) ir.Node {
- n.Name().SetNeedzero(true)
+func markNeedZero(n *ir.Name) *ir.Name {
+ n.SetNeedzero(true)
return n
}
// Test all code paths for cmpstackvarlt.
func TestCmpstackvar(t *testing.T) {
- nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
if s == nil {
s = &types.Sym{Name: "."}
}
return n
}
testdata := []struct {
- a, b ir.Node
+ a, b *ir.Name
lt bool
}{
{
}
func TestStackvarSort(t *testing.T) {
- nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
n := NewName(s)
n.SetType(t)
n.SetOffset(xoffset)
n.SetClass(cl)
return n
}
- inp := []ir.Node{
+ inp := []*ir.Name{
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
}
- want := []ir.Node{
+ want := []*ir.Name{
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
// A collection of global state used by liveness analysis.
type Liveness struct {
- fn ir.Node
+ fn *ir.Func
f *ssa.Func
vars []ir.Node
idx map[ir.Node]int32
// getvariables returns the list of on-stack variables that we need to track
// and a map for looking up indices by *Node.
-func getvariables(fn ir.Node) ([]ir.Node, map[ir.Node]int32) {
+func getvariables(fn *ir.Func) ([]ir.Node, map[ir.Node]int32) {
var vars []ir.Node
- for _, n := range fn.Func().Dcl {
+ for _, n := range fn.Dcl {
if livenessShouldTrack(n) {
vars = append(vars, n)
}
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
-func newliveness(fn ir.Node, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness {
+func newliveness(fn *ir.Func, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness {
lv := &Liveness{
fn: fn,
f: f,
// pointers to copy values back to the stack).
// TODO: if the output parameter is heap-allocated, then we
// don't need to keep the stack copy live?
- if lv.fn.Func().HasDefer() {
+ if lv.fn.HasDefer() {
for i, n := range lv.vars {
if n.Class() == ir.PPARAMOUT {
if n.Name().IsOutputParamHeapAddr() {
if n.Class() == ir.PPARAM {
continue // ok
}
- base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func().Nname, n)
+ base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n)
}
// Record live variables.
}
// If we have an open-coded deferreturn call, make a liveness map for it.
- if lv.fn.Func().OpenCodedDeferDisallowed() {
+ if lv.fn.OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessDontCare
} else {
lv.livenessMap.deferreturn = LivenessIndex{
// input parameters.
for j, n := range lv.vars {
if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
- lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func().Nname, n)
+ lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n)
}
}
}
return
}
- pos := lv.fn.Func().Nname.Pos()
+ pos := lv.fn.Nname.Pos()
if v != nil {
pos = v.Pos
}
if b == lv.f.Entry {
live := lv.stackMaps[0]
- fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func().Nname.Pos()))
+ fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Nname.Pos()))
fmt.Printf("\tlive=")
printed = false
for j, n := range lv.vars {
}
// Emit the live pointer map data structures
- ls := e.curfn.Func().LSym
+ ls := e.curfn.LSym
fninfo := ls.Func()
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
return false
}
-func instrument(fn ir.Node) {
- if fn.Func().Pragma&ir.Norace != 0 {
+func instrument(fn *ir.Func) {
+ if fn.Pragma&ir.Norace != 0 {
return
}
if !base.Flag.Race || !ispkgin(norace_inst_pkgs) {
- fn.Func().SetInstrumentBody(true)
+ fn.SetInstrumentBody(true)
}
if base.Flag.Race {
base.Pos = src.NoXPos
if thearch.LinkArch.Arch.Family != sys.AMD64 {
- fn.Func().Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
- fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil))
+ fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
+ fn.Exit.Append(mkcall("racefuncexit", nil, nil))
} else {
// nodpc is the PC of the caller as extracted by
// This only works for amd64. This will not
// work on arm or others that might support
// race in the future.
- nodpc := ir.Copy(nodfp)
+ nodpc := ir.Copy(nodfp).(*ir.Name)
nodpc.SetType(types.Types[types.TUINTPTR])
nodpc.SetOffset(int64(-Widthptr))
- fn.Func().Dcl = append(fn.Func().Dcl, nodpc)
- fn.Func().Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
- fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil))
+ fn.Dcl = append(fn.Dcl, nodpc)
+ fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
+ fn.Exit.Append(mkcall("racefuncexit", nil, nil))
}
base.Pos = lno
}
var fn ir.Node
if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
- Curfn.Func().SetWBPos(stmt.Pos())
+ Curfn.SetWBPos(stmt.Pos())
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
} else {
// memclrNoHeapPointers(hp, hn)
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
- analyze func([]ir.Node, bool)
+ analyze func([]*ir.Func, bool)
visitgen uint32
- nodeID map[ir.Node]uint32
- stack []ir.Node
+ nodeID map[*ir.Func]uint32
+ stack []*ir.Func
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
-func visitBottomUp(list []ir.Node, analyze func(list []ir.Node, recursive bool)) {
+func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
- v.nodeID = make(map[ir.Node]uint32)
+ v.nodeID = make(map[*ir.Func]uint32)
for _, n := range list {
if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() {
- v.visit(n)
+ v.visit(n.(*ir.Func))
}
}
}
-func (v *bottomUpVisitor) visit(n ir.Node) uint32 {
+func (v *bottomUpVisitor) visit(n *ir.Func) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
case ir.ONAME:
if n.Class() == ir.PFUNC {
if n != nil && n.Name().Defn != nil {
- if m := v.visit(n.Name().Defn); m < min {
+ if m := v.visit(n.Name().Defn.(*ir.Func)); m < min {
min = m
}
}
}
case ir.OMETHEXPR:
fn := methodExprName(n)
- if fn != nil && fn.Name().Defn != nil {
- if m := v.visit(fn.Name().Defn); m < min {
+ if fn != nil && fn.Defn != nil {
+ if m := v.visit(fn.Defn.(*ir.Func)); m < min {
min = m
}
}
case ir.ODOTMETH:
fn := methodExprName(n)
- if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
- if m := v.visit(fn.Name().Defn); m < min {
+ if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Defn != nil {
+ if m := v.visit(fn.Defn.(*ir.Func)); m < min {
min = m
}
}
case ir.OCALLPART:
fn := ir.AsNode(callpartMethod(n).Nname)
if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
- if m := v.visit(fn.Name().Defn); m < min {
+ if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min {
min = m
}
}
case ir.OCLOSURE:
- if m := v.visit(n.Func().Decl); m < min {
+ if m := v.visit(n.Func()); m < min {
min = m
}
}
return true
})
- if (min == id || min == id+1) && !n.Func().IsHiddenClosure() {
+ if (min == id || min == id+1) && !n.IsHiddenClosure() {
// This node is the root of a strongly connected component.
// The original min passed to visitcodelist was v.nodeID[n]+1.
const maxOpenDefers = 8
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
-var ssaDumpInlined []ir.Node
+var ssaDumpInlined []*ir.Func
func initssaconfig() {
types_ := ssa.NewTypes()
// - Size of the argument
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
- x := base.Ctxt.Lookup(s.curfn.Func().LSym.Name + ".opendefer")
- s.curfn.Func().LSym.Func().OpenCodedDeferInfo = x
+ x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
+ s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
-func buildssa(fn ir.Node, worker int) *ssa.Func {
+func buildssa(fn *ir.Func, worker int) *ssa.Func {
name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
- ir.FDumpList(astBuf, "buildssa-enter", fn.Func().Enter)
+ ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
ir.FDumpList(astBuf, "buildssa-body", fn.Body())
- ir.FDumpList(astBuf, "buildssa-exit", fn.Func().Exit)
+ ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
s.pushLine(fn.Pos())
defer s.popLine()
- s.hasdefer = fn.Func().HasDefer()
- if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 {
+ s.hasdefer = fn.HasDefer()
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
- if fn.Func().Pragma&ir.Nosplit != 0 {
+ if fn.Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
s.panics = map[funcLine]*ssa.Block{}
s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
- s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed()
+ s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
switch {
case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// that we don't track correctly.
s.hasOpenDefers = false
}
- if s.hasOpenDefers && s.curfn.Func().Exit.Len() > 0 {
+ if s.hasOpenDefers && s.curfn.Exit.Len() > 0 {
// Skip doing open defers if there is any extra exit code (likely
// copying heap-allocated return values or race detection), since
// we will not generate that code in the case of the extra
s.hasOpenDefers = false
}
if s.hasOpenDefers &&
- s.curfn.Func().NumReturns*s.curfn.Func().NumDefers > 15 {
+ s.curfn.NumReturns*s.curfn.NumDefers > 15 {
// Since we are generating defer calls at every exit for
// open-coded defers, skip doing open-coded defers if there are
// too many returns (especially if there are multiple defers).
s.decladdrs = map[ir.Node]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
- for _, n := range fn.Func().Dcl {
+ for _, n := range fn.Dcl {
switch n.Class() {
case ir.PPARAM:
s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
}
// Populate SSAable arguments.
- for _, n := range fn.Func().Dcl {
+ for _, n := range fn.Dcl {
if n.Class() == ir.PPARAM && s.canSSA(n) {
v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
}
// Convert the AST-based IR to the SSA-based IR
- s.stmtList(fn.Func().Enter)
+ s.stmtList(fn.Enter)
s.stmtList(fn.Body())
// fallthrough to exit
if s.curBlock != nil {
- s.pushLine(fn.Func().Endlineno)
+ s.pushLine(fn.Endlineno)
s.exit()
s.popLine()
}
return s.f
}
-func dumpSourcesColumn(writer *ssa.HTMLWriter, fn ir.Node) {
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
// Read sources of target function fn.
fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
- targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line())
+ targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
if err != nil {
writer.Logf("cannot read sources for function %v: %v", fn, err)
}
// Read sources of inlined functions.
var inlFns []*ssa.FuncLines
for _, fi := range ssaDumpInlined {
- var elno src.XPos
- if fi.Name().Defn == nil {
- // Endlineno is filled from exported data.
- elno = fi.Func().Endlineno
- } else {
- elno = fi.Name().Defn.Func().Endlineno
- }
+ elno := fi.Endlineno
fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
if err != nil {
f *ssa.Func
// Node for function
- curfn ir.Node
+ curfn *ir.Func
// labels in f
labels map[string]*ssaLabel
}
func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
- if !s.curfn.Func().InstrumentBody() {
+ if !s.curfn.InstrumentBody() {
return
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
- s.stmtList(s.curfn.Func().Exit)
+ s.stmtList(s.curfn.Exit)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
pos = n.Pos()
}
argTemp := tempAt(pos.WithNotStmt(), s.curfn, t)
- argTemp.Name().SetOpenDeferSlot(true)
+ argTemp.SetOpenDeferSlot(true)
var addrArgTemp *ssa.Value
// Use OpVarLive to make sure stack slots for the args, etc. are not
// removed by dead-store elimination
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
- argTemp.Name().SetNeedzero(true)
+ argTemp.SetNeedzero(true)
}
if !canSSA {
a := s.addr(n)
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
// We can then pass that to defer or go.
n2 := ir.NewNameAt(fn.Pos(), fn.Sym())
- n2.Name().Curfn = s.curfn
+ n2.Curfn = s.curfn
n2.SetClass(ir.PFUNC)
// n2.Sym already existed, so it's already marked as a function.
n2.SetPos(fn.Pos())
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
- if base.Debug.DisableNil != 0 || s.curfn.Func().NilCheckDisabled() {
+ if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
func emitStackObjects(e *ssafn, pp *Progs) {
var vars []ir.Node
- for _, n := range e.curfn.Func().Dcl {
+ for _, n := range e.curfn.Dcl {
if livenessShouldTrack(n) && n.Name().Addrtaken() {
vars = append(vars, n)
}
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
- x := e.curfn.Func().LSym.Func().StackObjects
+ x := e.curfn.LSym.Func().StackObjects
off := 0
off = duintptr(x, off, uint64(len(vars)))
for _, v := range vars {
s.livenessMap = liveness(e, f, pp)
emitStackObjects(e, pp)
- openDeferInfo := e.curfn.Func().LSym.Func().OpenCodedDeferInfo
+ openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[m])
+ pp.curfn.LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[p])
+ pp.curfn.LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
if base.Ctxt.Flag_locationlists {
debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset)
- e.curfn.Func().DebugInfo = debugInfo
+ e.curfn.DebugInfo = debugInfo
bstart := s.bstart
// Note that at this moment, Prog.Pc is a sequence number; it's
// not a real PC until after assembly, so this mapping has to
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
- return e.curfn.Func().LSym.Size
+ return e.curfn.LSym.Size
default:
return valueToProgAfter[v].Pc
}
var state uint32
// Iterate through declarations. They are sorted in decreasing Xoffset order.
- for _, n := range e.curfn.Func().Dcl {
+ for _, n := range e.curfn.Dcl {
if !n.Name().Needzero() {
continue
}
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
- curfn ir.Node
+ curfn *ir.Func
strings map[string]*obj.LSym // map from constant string to data symbols
scratchFpMem ir.Node // temp for floating point register / memory moves on some architectures
stksize int64 // stack size for current frame
n.SetType(t)
n.SetClass(ir.PAUTO)
n.SetEsc(EscNever)
- n.Name().Curfn = e.curfn
- e.curfn.Func().Dcl = append(e.curfn.Func().Dcl, n)
+ n.Curfn = e.curfn
+ e.curfn.Dcl = append(e.curfn.Dcl, n)
dowidth(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
}
func (e *ssafn) SetWBPos(pos src.XPos) {
- e.curfn.Func().SetWBPos(pos)
+ e.curfn.SetWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
if Curfn == nil {
base.Fatalf("autolabel outside function")
}
- n := fn.Func().Label
- fn.Func().Label++
+ n := fn.Label
+ fn.Label++
return lookupN(prefix, int(n))
}
// newname returns a new ONAME Node associated with symbol s.
func NewName(s *types.Sym) *ir.Name {
n := ir.NewNameAt(base.Pos, s)
- n.Name().Curfn = Curfn
+ n.Curfn = Curfn
return n
}
tfn.PtrRlist().Set(structargs(method.Type.Results(), false))
fn := dclfunc(newnam, tfn)
- fn.Func().SetDupok(true)
+ fn.SetDupok(true)
nthis := ir.AsNode(tfn.Type().Recv().Nname)
fn.PtrBody().Append(as)
fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
} else {
- fn.Func().SetWrapper(true) // ignore frame for panic+recover matching
+ fn.SetWrapper(true) // ignore frame for panic+recover matching
call := ir.Nod(ir.OCALL, dot, nil)
call.PtrList().Set(paramNnames(tfn.Type()))
call.SetIsDDD(tfn.Type().IsVariadic())
testdclstack()
}
- fn = typecheck(fn, ctxStmt)
-
+ typecheckFunc(fn)
Curfn = fn
typecheckslice(fn.Body().Slice(), ctxStmt)
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
inlcalls(fn)
}
- escapeFuncs([]ir.Node{fn}, false)
+ escapeFuncs([]*ir.Func{fn}, false)
Curfn = nil
xtop = append(xtop, fn)
base.Fatalf("recursive inimport")
}
inimport = true
- expandDecl(n)
+ expandDecl(n.(*ir.Name))
inimport = false
return n
}
var typecheck_tcstack []ir.Node
+func typecheckFunc(fn *ir.Func) {
+ new := typecheck(fn, ctxStmt)
+ if new != fn {
+ base.Fatalf("typecheck changed func")
+ }
+}
+
// typecheck type checks node n.
// The result of typecheck MUST be assigned back to n, e.g.
// n.Left = typecheck(n.Left, top)
case ir.ODCLFUNC:
ok |= ctxStmt
- typecheckfunc(n)
+ typecheckfunc(n.(*ir.Func))
case ir.ODCLCONST:
ok |= ctxStmt
}
// type check function definition
-func typecheckfunc(n ir.Node) {
+// To be called by typecheck, not directly.
+// (Call typecheckfn instead.)
+func typecheckfunc(n *ir.Func) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckfunc", n)(nil)
}
- for _, ln := range n.Func().Dcl {
+ for _, ln := range n.Dcl {
if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) {
ln.Name().Decldepth = 1
}
}
- n.Func().Nname = typecheck(n.Func().Nname, ctxExpr|ctxAssign)
- t := n.Func().Nname.Type()
+ n.Nname = typecheck(n.Nname, ctxExpr|ctxAssign).(*ir.Name)
+ t := n.Nname.Type()
if t == nil {
return
}
n.SetType(t)
rcvr := t.Recv()
- if rcvr != nil && n.Func().Shortname != nil {
- m := addmethod(n, n.Func().Shortname, t, true, n.Func().Pragma&ir.Nointerface != 0)
+ if rcvr != nil && n.Shortname != nil {
+ m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0)
if m == nil {
return
}
- n.Func().Nname.SetSym(methodSym(rcvr.Type, n.Func().Shortname))
- declare(n.Func().Nname, ir.PFUNC)
+ n.Nname.SetSym(methodSym(rcvr.Type, n.Shortname))
+ declare(n.Nname, ir.PFUNC)
}
- if base.Ctxt.Flag_dynlink && !inimport && n.Func().Nname != nil {
- makefuncsym(n.Func().Nname.Sym())
+ if base.Ctxt.Flag_dynlink && !inimport && n.Nname != nil {
+ makefuncsym(n.Sym())
}
}
}
// checkreturn makes sure that fn terminates appropriately.
-func checkreturn(fn ir.Node) {
+func checkreturn(fn *ir.Func) {
if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 {
var labels map[*types.Sym]ir.Node
markbreaklist(&labels, fn.Body(), nil)
if !isTermNodes(fn.Body()) {
- base.ErrorfAt(fn.Func().Endlineno, "missing return at end of function")
+ base.ErrorfAt(fn.Endlineno, "missing return at end of function")
}
}
}
-func deadcode(fn ir.Node) {
+func deadcode(fn *ir.Func) {
deadcodeslice(fn.PtrBody())
- deadcodefn(fn)
-}
-func deadcodefn(fn ir.Node) {
if fn.Body().Len() == 0 {
return
}
// Initialization expressions for package-scope variables.
return ir.LocalPkg
}
-
- // TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for
- // Curfn, rather than mixing them.
- if fn.Op() == ir.ODCLFUNC {
- fn = fn.Func().Nname
- }
-
- return fnpkg(fn)
+ return fnpkg(fn.Nname)
}
// MethodName returns the ONAME representing the method
// referenced by expression n, which must be a method selector,
// method expression, or method value.
-func methodExprName(n ir.Node) ir.Node {
- return ir.AsNode(methodExprFunc(n).Nname)
+func methodExprName(n ir.Node) *ir.Name {
+ name, _ := ir.AsNode(methodExprFunc(n).Nname).(*ir.Name)
+ return name
}
// MethodFunc is like MethodName, but returns the types.Field instead.
const tmpstringbufsize = 32
const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-func walk(fn ir.Node) {
+func walk(fn *ir.Func) {
Curfn = fn
errorsBefore := base.Errors()
if base.Flag.W != 0 {
- s := fmt.Sprintf("\nbefore walk %v", Curfn.Func().Nname.Sym())
+ s := fmt.Sprintf("\nbefore walk %v", Curfn.Sym())
ir.DumpList(s, Curfn.Body())
}
lno := base.Pos
// Final typecheck for any unused variables.
- for i, ln := range fn.Func().Dcl {
+ for i, ln := range fn.Dcl {
if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) {
- ln = typecheck(ln, ctxExpr|ctxAssign)
- fn.Func().Dcl[i] = ln
+ ln = typecheck(ln, ctxExpr|ctxAssign).(*ir.Name)
+ fn.Dcl[i] = ln
}
}
// Propagate the used flag for typeswitch variables up to the NONAME in its definition.
- for _, ln := range fn.Func().Dcl {
+ for _, ln := range fn.Dcl {
if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name().Defn != nil && ln.Name().Defn.Op() == ir.OTYPESW && ln.Name().Used() {
ln.Name().Defn.Left().Name().SetUsed(true)
}
}
- for _, ln := range fn.Func().Dcl {
+ for _, ln := range fn.Dcl {
if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Name().Used() {
continue
}
}
walkstmtlist(Curfn.Body().Slice())
if base.Flag.W != 0 {
- s := fmt.Sprintf("after walk %v", Curfn.Func().Nname.Sym())
+ s := fmt.Sprintf("after walk %v", Curfn.Sym())
ir.DumpList(s, Curfn.Body())
}
zeroResults()
heapmoves()
- if base.Flag.W != 0 && Curfn.Func().Enter.Len() > 0 {
- s := fmt.Sprintf("enter %v", Curfn.Func().Nname.Sym())
- ir.DumpList(s, Curfn.Func().Enter)
+ if base.Flag.W != 0 && Curfn.Enter.Len() > 0 {
+ s := fmt.Sprintf("enter %v", Curfn.Sym())
+ ir.DumpList(s, Curfn.Enter)
}
}
}
}
-func paramoutheap(fn ir.Node) bool {
- for _, ln := range fn.Func().Dcl {
+func paramoutheap(fn *ir.Func) bool {
+ for _, ln := range fn.Dcl {
switch ln.Class() {
case ir.PPARAMOUT:
if isParamStackCopy(ln) || ln.Name().Addrtaken() {
base.Errorf("case statement out of place")
case ir.ODEFER:
- Curfn.Func().SetHasDefer(true)
- Curfn.Func().NumDefers++
- if Curfn.Func().NumDefers > maxOpenDefers {
+ Curfn.SetHasDefer(true)
+ Curfn.NumDefers++
+ if Curfn.NumDefers > maxOpenDefers {
// Don't allow open-coded defers if there are more than
// 8 defers in the function, since we use a single
// byte to record active defers.
- Curfn.Func().SetOpenCodedDeferDisallowed(true)
+ Curfn.SetOpenCodedDeferDisallowed(true)
}
if n.Esc() != EscNever {
// If n.Esc is not EscNever, then this defer occurs in a loop,
// so open-coded defers cannot be used in this function.
- Curfn.Func().SetOpenCodedDeferDisallowed(true)
+ Curfn.SetOpenCodedDeferDisallowed(true)
}
fallthrough
case ir.OGO:
walkstmtlist(n.Rlist().Slice())
case ir.ORETURN:
- Curfn.Func().NumReturns++
+ Curfn.NumReturns++
if n.List().Len() == 0 {
break
}
// so that reorder3 can fix up conflicts
var rl []ir.Node
- for _, ln := range Curfn.Func().Dcl {
+ for _, ln := range Curfn.Dcl {
cl := ln.Class()
if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
break
}
if cl == ir.PPARAMOUT {
+ var ln ir.Node = ln
if isParamStackCopy(ln) {
ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Heapaddr, nil), ctxExpr), nil)
}
fromType := n.Left().Type()
toType := n.Type()
- if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func().Nname) { // skip unnamed functions (func _())
- markTypeUsedInInterface(fromType, Curfn.Func().LSym)
+ if !fromType.IsInterface() && !ir.IsBlank(Curfn.Nname) { // skip unnamed functions (func _())
+ markTypeUsedInInterface(fromType, Curfn.LSym)
}
// typeword generates the type word of the interface value.
func markUsedIfaceMethod(n ir.Node) {
ityp := n.Left().Left().Type()
tsym := typenamesym(ityp).Linksym()
- r := obj.Addrel(Curfn.Func().LSym)
+ r := obj.Addrel(Curfn.LSym)
r.Sym = tsym
// n.Left.Xoffset is the method index * Widthptr (the offset of code pointer
// in itab).
v = v.Name().Stackcopy
}
// Zero the stack location containing f.
- Curfn.Func().Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil))
+ Curfn.Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil))
}
}
nn := paramstoheap(Curfn.Type().Recvs())
nn = append(nn, paramstoheap(Curfn.Type().Params())...)
nn = append(nn, paramstoheap(Curfn.Type().Results())...)
- Curfn.Func().Enter.Append(nn...)
- base.Pos = Curfn.Func().Endlineno
- Curfn.Func().Exit.Append(returnsfromheap(Curfn.Type().Results())...)
+ Curfn.Enter.Append(nn...)
+ base.Pos = Curfn.Endlineno
+ Curfn.Exit.Append(returnsfromheap(Curfn.Type().Results())...)
base.Pos = lno
}
nptr2 := l2
- Curfn.Func().SetWBPos(n.Pos())
+ Curfn.SetWBPos(n.Pos())
// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
fn := syslook("typedslicecopy")
hasPointers := elemtype.HasPointers()
if hasPointers {
clrname = "memclrHasPointers"
- Curfn.Func().SetWBPos(n.Pos())
+ Curfn.SetWBPos(n.Pos())
}
var clr ir.Nodes
//
func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node {
if n.Left().Type().Elem().HasPointers() {
- Curfn.Func().SetWBPos(n.Pos())
+ Curfn.SetWBPos(n.Pos())
fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem())
n.SetLeft(cheapexpr(n.Left(), init))
ptrL, lenL := backingArrayPtrLen(n.Left())
// (including global variables such as numImports - was issue #19028).
// Also need to check for reflect package itself (see Issue #38515).
if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
- Curfn.Func().SetReflectMethod(true)
+ Curfn.SetReflectMethod(true)
// The LSym is initialized at this point. We need to set the attribute on the LSym.
- Curfn.Func().LSym.Set(obj.AttrReflectMethod, true)
+ Curfn.LSym.Set(obj.AttrReflectMethod, true)
}
}
}
sym := tracksym(outer, field)
- if Curfn.Func().FieldTrack == nil {
- Curfn.Func().FieldTrack = make(map[*types.Sym]struct{})
+ if Curfn.FieldTrack == nil {
+ Curfn.FieldTrack = make(map[*types.Sym]struct{})
}
- Curfn.Func().FieldTrack[sym] = struct{}{}
+ Curfn.FieldTrack[sym] = struct{}{}
}
func candiscardlist(l ir.Nodes) bool {
funcbody()
- fn = typecheck(fn, ctxStmt)
+ typecheckFunc(fn)
typecheckslice(fn.Body().Slice(), ctxStmt)
xtop = append(xtop, fn)
call = ir.Nod(ir.OCALL, nil, nil)
- call.SetLeft(fn.Func().Nname)
+ call.SetLeft(fn.Nname)
call.PtrList().Set(n.List().Slice())
call = typecheck(call, ctxStmt)
call = walkexpr(call, init)
// checkPtr reports whether pointer checking should be enabled for
// function fn at a given level. See debugHelpFooter for defined
// levels.
-func checkPtr(fn ir.Node, level int) bool {
- return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0
+func checkPtr(fn *ir.Func, level int) bool {
+ return base.Debug.Checkptr >= level && fn.Pragma&ir.NoCheckPtr == 0
}
mode.Fprintf(s, "%v { %v }", n.Type(), n.Body())
return
}
- mode.Fprintf(s, "%v { %v }", n.Type(), n.Func().Decl.Body())
+ mode.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body())
case OCOMPLIT:
if mode == FErr {
}
}
- if n.Op() == OCLOSURE && n.Func().Decl != nil && n.Func().Nname.Sym() != nil {
+ if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Nname.Sym() != nil {
mode.Fprintf(s, " fnName %v", n.Func().Nname.Sym())
}
if n.Sym() != nil && n.Op() != ONAME {
if n.Right() != nil {
mode.Fprintf(s, "%v", n.Right())
}
- if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Decl != nil && n.Func().Decl.Body().Len() != 0 {
+ if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Body().Len() != 0 {
indent(s)
// The function associated with a closure
- mode.Fprintf(s, "%v-clofunc%v", n.Op(), n.Func().Decl)
+ mode.Fprintf(s, "%v-clofunc%v", n.Op(), n.Func())
}
if n.Op() == ODCLFUNC && n.Func() != nil && n.Func().Dcl != nil && len(n.Func().Dcl) != 0 {
indent(s)
// The dcls for a func or closure
- mode.Fprintf(s, "%v-dcl%v", n.Op(), AsNodes(n.Func().Dcl))
+ mode.Fprintf(s, "%v-dcl%v", n.Op(), asNameNodes(n.Func().Dcl))
}
if n.List().Len() != 0 {
indent(s)
}
}
+// asNameNodes copies list to a new Nodes.
+// It should only be called in debug formatting and other low-performance contexts.
+func asNameNodes(list []*Name) Nodes {
+ var ns Nodes
+ for _, n := range list {
+ ns.Append(n)
+ }
+ return ns
+}
+
// "%S" suppresses qualifying with package
func symFormat(s *types.Sym, f fmt.State, verb rune, mode FmtMode) {
switch verb {
body Nodes
iota int64
- Nname Node // ONAME node
- Decl Node // ODCLFUNC node
- OClosure Node // OCLOSURE node
+ Nname *Name // ONAME node
+ OClosure Node // OCLOSURE node
Shortname *types.Sym
Exit Nodes
// ONAME nodes for all params/locals for this func/closure, does NOT
// include closurevars until transformclosure runs.
- Dcl []Node
+ Dcl []*Name
- ClosureEnter Nodes // list of ONAME nodes of captured variables
- ClosureType Node // closure representation type
- ClosureCalled bool // closure is only immediately called
- ClosureVars Nodes // closure params; each has closurevar set
+ ClosureEnter Nodes // list of ONAME nodes (or OADDR-of-ONAME nodes, for output parameters) of captured variables
+ ClosureType Node // closure representation type
+ ClosureVars []*Name // closure params; each has closurevar set
// Parents records the parent scope of each scope within a
// function. The root scope (0) has no parent, so the i'th
// Marks records scope boundary changes.
Marks []Mark
- // Closgen tracks how many closures have been generated within
- // this function. Used by closurename for creating unique
- // function names.
- Closgen int
-
FieldTrack map[*types.Sym]struct{}
DebugInfo interface{}
LSym *obj.LSym
Inl *Inline
+ // Closgen tracks how many closures have been generated within
+ // this function. Used by closurename for creating unique
+ // function names.
+ Closgen int32
+
Label int32 // largest auto-generated label in this function
Endlineno src.XPos
Pragma PragmaFlag // go:xxx function annotations
flags bitset16
- NumDefers int // number of defer calls in the function
- NumReturns int // number of explicit returns in the function
+ NumDefers int32 // number of defer calls in the function
+ NumReturns int32 // number of explicit returns in the function
// nwbrCalls records the LSyms of functions called by this
// function for go:nowritebarrierrec analysis. Only filled in
f := new(Func)
f.pos = pos
f.op = ODCLFUNC
- f.Decl = f
f.iota = -1
return f
}
Cost int32 // heuristic cost of inlining this function
// Copies of Func.Dcl and Nbody for use during inlining.
- Dcl []Node
+ Dcl []*Name
Body []Node
}
funcExportInline // include inline body in export data
funcInstrumentBody // add race/msan instrumentation during SSA construction
funcOpenCodedDeferDisallowed // can't do open-coded defers
+ funcClosureCalled // closure is only immediately called
)
type SymAndPos struct {
func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
+func (f *Func) ClosureCalled() bool { return f.flags&funcClosureCalled != 0 }
func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
+func (f *Func) SetClosureCalled(b bool) { f.flags.set(funcClosureCalled, b) }
func (f *Func) SetWBPos(pos src.XPos) {
if base.Debug.WB != 0 {
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
// For a closure var, the ONAME node of the outer captured variable
Defn Node
- // The ODCLFUNC node (for a static function/method or a closure) in which
- // local variable or param is declared.
- Curfn Node
+
+ // The function, method, or closure in which local variable or param is declared.
+ Curfn *Func
+
// Unique number for ONAME nodes within a function. Function outputs
// (results) are numbered starting at one, followed by function inputs
// (parameters), and then local variables. Vargen is used to distinguish
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Func{}, 180, 320},
- {Name{}, 132, 232},
+ {Func{}, 172, 296},
+ {Name{}, 128, 224},
{node{}, 84, 144},
}