// func sym(p *T, h uintptr) uintptr
tfn := ir.Nod(ir.OTFUNC, nil, nil)
- tfn.List.Set2(
+ tfn.PtrList().Set2(
namedfield("p", types.NewPtr(t)),
namedfield("h", types.Types[types.TUINTPTR]),
)
- tfn.Rlist.Set1(anonfield(types.Types[types.TUINTPTR]))
+ tfn.PtrRlist().Set1(anonfield(types.Types[types.TUINTPTR]))
fn := dclfunc(sym, tfn)
- np := ir.AsNode(tfn.Type.Params().Field(0).Nname)
- nh := ir.AsNode(tfn.Type.Params().Field(1).Nname)
+ np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+ nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
switch t.Etype {
case types.TARRAY:
n := ir.Nod(ir.ORANGE, nil, ir.Nod(ir.ODEREF, np, nil))
ni := NewName(lookup("i"))
- ni.Type = types.Types[types.TINT]
- n.List.Set1(ni)
+ ni.SetType(types.Types[types.TINT])
+ n.PtrList().Set1(ni)
n.SetColas(true)
- colasdefn(n.List.Slice(), n)
- ni = n.List.First()
+ colasdefn(n.List().Slice(), n)
+ ni = n.List().First()
// h = hashel(&p[i], h)
call := ir.Nod(ir.OCALL, hashel, nil)
nx := ir.Nod(ir.OINDEX, np, ni)
nx.SetBounded(true)
na := ir.Nod(ir.OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- n.Nbody.Append(ir.Nod(ir.OAS, nh, call))
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ n.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
- fn.Nbody.Append(n)
+ fn.PtrBody().Append(n)
case types.TSTRUCT:
// Walk the struct using memhash for runs of AMEM
call := ir.Nod(ir.OCALL, hashel, nil)
nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
na := ir.Nod(ir.OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- fn.Nbody.Append(ir.Nod(ir.OAS, nh, call))
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
i++
continue
}
call := ir.Nod(ir.OCALL, hashel, nil)
nx := nodSym(ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
na := ir.Nod(ir.OADDR, nx, nil)
- call.List.Append(na)
- call.List.Append(nh)
- call.List.Append(nodintconst(size))
- fn.Nbody.Append(ir.Nod(ir.OAS, nh, call))
+ call.PtrList().Append(na)
+ call.PtrList().Append(nh)
+ call.PtrList().Append(nodintconst(size))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nh, call))
i = next
}
}
r := ir.Nod(ir.ORETURN, nil, nil)
- r.List.Append(nh)
- fn.Nbody.Append(r)
+ r.PtrList().Append(nh)
+ fn.PtrBody().Append(r)
if base.Flag.LowerR != 0 {
- ir.DumpList("genhash body", fn.Nbody)
+ ir.DumpList("genhash body", fn.Body())
}
funcbody()
- fn.Func.SetDupok(true)
+ fn.Func().SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
if base.Debug.DclStack != 0 {
testdclstack()
}
- fn.Func.SetNilCheckDisabled(true)
+ fn.Func().SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Build closure. It doesn't close over any variables, so
n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []*ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[types.TUINTPTR]),
}, []*ir.Node{
anonfield(types.Types[types.TUINTPTR]),
- })
+ }))
return n
}
// func sym(p, q *T) bool
tfn := ir.Nod(ir.OTFUNC, nil, nil)
- tfn.List.Set2(
+ tfn.PtrList().Set2(
namedfield("p", types.NewPtr(t)),
namedfield("q", types.NewPtr(t)),
)
- tfn.Rlist.Set1(namedfield("r", types.Types[types.TBOOL]))
+ tfn.PtrRlist().Set1(namedfield("r", types.Types[types.TBOOL]))
fn := dclfunc(sym, tfn)
- np := ir.AsNode(tfn.Type.Params().Field(0).Nname)
- nq := ir.AsNode(tfn.Type.Params().Field(1).Nname)
- nr := ir.AsNode(tfn.Type.Results().Field(0).Nname)
+ np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+ nq := ir.AsNode(tfn.Type().Params().Field(1).Nname)
+ nr := ir.AsNode(tfn.Type().Results().Field(0).Nname)
// Label to jump to if an equality test fails.
neq := autolabel(".neq")
// pi := p[i]
pi := ir.Nod(ir.OINDEX, np, i)
pi.SetBounded(true)
- pi.Type = t.Elem()
+ pi.SetType(t.Elem())
// qi := q[i]
qi := ir.Nod(ir.OINDEX, nq, i)
qi.SetBounded(true)
- qi.Type = t.Elem()
+ qi.SetType(t.Elem())
return eq(pi, qi)
}
for i := int64(0); i < nelem; i++ {
// if check {} else { goto neq }
nif := ir.Nod(ir.OIF, checkIdx(nodintconst(i)), nil)
- nif.Rlist.Append(nodSym(ir.OGOTO, nil, neq))
- fn.Nbody.Append(nif)
+ nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ fn.PtrBody().Append(nif)
}
if last {
- fn.Nbody.Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem))))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, checkIdx(nodintconst(nelem))))
}
} else {
// Generate a for loop.
cond := ir.Nod(ir.OLT, i, nodintconst(nelem))
post := ir.Nod(ir.OAS, i, ir.Nod(ir.OADD, i, nodintconst(1)))
loop := ir.Nod(ir.OFOR, cond, post)
- loop.Ninit.Append(init)
+ loop.PtrInit().Append(init)
// if eq(pi, qi) {} else { goto neq }
nif := ir.Nod(ir.OIF, checkIdx(i), nil)
- nif.Rlist.Append(nodSym(ir.OGOTO, nil, neq))
- loop.Nbody.Append(nif)
- fn.Nbody.Append(loop)
+ nif.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ loop.PtrBody().Append(nif)
+ fn.PtrBody().Append(loop)
if last {
- fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(true)))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
}
}
}
var flatConds []*ir.Node
for _, c := range conds {
isCall := func(n *ir.Node) bool {
- return n.Op == ir.OCALL || n.Op == ir.OCALLFUNC
+ return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
}
if len(flatConds) == 0 {
- fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(true)))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(true)))
} else {
for _, c := range flatConds[:len(flatConds)-1] {
// if cond {} else { goto neq }
n := ir.Nod(ir.OIF, c, nil)
- n.Rlist.Append(nodSym(ir.OGOTO, nil, neq))
- fn.Nbody.Append(n)
+ n.PtrRlist().Append(nodSym(ir.OGOTO, nil, neq))
+ fn.PtrBody().Append(n)
}
- fn.Nbody.Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1]))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, flatConds[len(flatConds)-1]))
}
}
// ret:
// return
ret := autolabel(".ret")
- fn.Nbody.Append(nodSym(ir.OLABEL, nil, ret))
- fn.Nbody.Append(ir.Nod(ir.ORETURN, nil, nil))
+ fn.PtrBody().Append(nodSym(ir.OLABEL, nil, ret))
+ fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
// neq:
// r = false
// return (or goto ret)
- fn.Nbody.Append(nodSym(ir.OLABEL, nil, neq))
- fn.Nbody.Append(ir.Nod(ir.OAS, nr, nodbool(false)))
+ fn.PtrBody().Append(nodSym(ir.OLABEL, nil, neq))
+ fn.PtrBody().Append(ir.Nod(ir.OAS, nr, nodbool(false)))
if EqCanPanic(t) || hasCall(fn) {
// Epilogue is large, so share it with the equal case.
- fn.Nbody.Append(nodSym(ir.OGOTO, nil, ret))
+ fn.PtrBody().Append(nodSym(ir.OGOTO, nil, ret))
} else {
// Epilogue is small, so don't bother sharing.
- fn.Nbody.Append(ir.Nod(ir.ORETURN, nil, nil))
+ fn.PtrBody().Append(ir.Nod(ir.ORETURN, nil, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// the board. See #24936.
if base.Flag.LowerR != 0 {
- ir.DumpList("geneq body", fn.Nbody)
+ ir.DumpList("geneq body", fn.Body())
}
funcbody()
- fn.Func.SetDupok(true)
+ fn.Func().SetDupok(true)
fn = typecheck(fn, ctxStmt)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
Curfn = nil
if base.Debug.DclStack != 0 {
// We are comparing a struct or an array,
// neither of which can be nil, and our comparisons
// are shallow.
- fn.Func.SetNilCheckDisabled(true)
+ fn.Func().SetNilCheckDisabled(true)
xtop = append(xtop, fn)
// Generate a closure which points at the function we just generated.
}
func hasCall(n *ir.Node) bool {
- if n.Op == ir.OCALL || n.Op == ir.OCALLFUNC {
+ if n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC {
return true
}
- if n.Left != nil && hasCall(n.Left) {
+ if n.Left() != nil && hasCall(n.Left()) {
return true
}
- if n.Right != nil && hasCall(n.Right) {
+ if n.Right() != nil && hasCall(n.Right()) {
return true
}
- for _, x := range n.Ninit.Slice() {
+ for _, x := range n.Init().Slice() {
if hasCall(x) {
return true
}
}
- for _, x := range n.Nbody.Slice() {
+ for _, x := range n.Body().Slice() {
if hasCall(x) {
return true
}
}
- for _, x := range n.List.Slice() {
+ for _, x := range n.List().Slice() {
if hasCall(x) {
return true
}
}
- for _, x := range n.Rlist.Slice() {
+ for _, x := range n.Rlist().Slice() {
if hasCall(x) {
return true
}
fn := syslook("memequal")
fn = substArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
call := ir.Nod(ir.OCALL, fn, nil)
- call.List.Append(sptr, tptr, ir.Copy(slen))
+ call.PtrList().Append(sptr, tptr, ir.Copy(slen))
call = typecheck(call, ctxExpr|ctxMultiOK)
cmp := ir.Nod(ir.OEQ, slen, tlen)
cmp = typecheck(cmp, ctxExpr)
- cmp.Type = types.Types[types.TBOOL]
+ cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) {
- if !types.Identical(s.Type, t.Type) {
- base.Fatalf("eqinterface %v %v", s.Type, t.Type)
+ if !types.Identical(s.Type(), t.Type()) {
+ base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
var fn *ir.Node
- if s.Type.IsEmptyInterface() {
+ if s.Type().IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
fn = syslook("ifaceeq")
ttab := ir.Nod(ir.OITAB, t, nil)
sdata := ir.Nod(ir.OIDATA, s, nil)
tdata := ir.Nod(ir.OIDATA, t, nil)
- sdata.Type = types.Types[types.TUNSAFEPTR]
- tdata.Type = types.Types[types.TUNSAFEPTR]
+ sdata.SetType(types.Types[types.TUNSAFEPTR])
+ tdata.SetType(types.Types[types.TUNSAFEPTR])
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := ir.Nod(ir.OCALL, fn, nil)
- call.List.Append(stab, sdata, tdata)
+ call.PtrList().Append(stab, sdata, tdata)
call = typecheck(call, ctxExpr|ctxMultiOK)
cmp := ir.Nod(ir.OEQ, stab, ttab)
cmp = typecheck(cmp, ctxExpr)
- cmp.Type = types.Types[types.TBOOL]
+ cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
nx = typecheck(nx, ctxExpr)
ny = typecheck(ny, ctxExpr)
- fn, needsize := eqmemfunc(size, nx.Type.Elem())
+ fn, needsize := eqmemfunc(size, nx.Type().Elem())
call := ir.Nod(ir.OCALL, fn, nil)
- call.List.Append(nx)
- call.List.Append(ny)
+ call.PtrList().Append(nx)
+ call.PtrList().Append(ny)
if needsize {
- call.List.Append(nodintconst(size))
+ call.PtrList().Append(nodintconst(size))
}
return call
// NOTE(rsc): This comment may be stale.
// It's possible the ordering has changed and this is
// now the common case. I'm not sure.
- if n.Name.Param.Stackcopy != nil {
- n.Name.Param.Stackcopy.Xoffset = o
- n.Xoffset = 0
+ if n.Name().Param.Stackcopy != nil {
+ n.Name().Param.Stackcopy.SetOffset(o)
+ n.SetOffset(0)
} else {
- n.Xoffset = o
+ n.SetOffset(o)
}
}
}
*path = append(*path, t)
- if p := ir.AsNode(t.Nod).Name.Param; p != nil && findTypeLoop(p.Ntype.Type, path) {
+ if p := ir.AsNode(t.Nod).Name().Param; p != nil && findTypeLoop(p.Ntype.Type(), path) {
return true
}
*path = (*path)[:len(*path)-1]
lno := base.Pos
if ir.AsNode(t.Nod) != nil {
- base.Pos = ir.AsNode(t.Nod).Pos
+ base.Pos = ir.AsNode(t.Nod).Pos()
}
t.Width = -2
// markObject visits a reachable object.
func (p *exporter) markObject(n *ir.Node) {
- if n.Op == ir.ONAME && n.Class() == ir.PFUNC {
+ if n.Op() == ir.ONAME && n.Class() == ir.PFUNC {
inlFlood(n)
}
- p.markType(n.Type)
+ p.markType(n.Type())
}
// markType recursively visits types reachable from t to identify
)
func npos(pos src.XPos, n *ir.Node) *ir.Node {
- n.Pos = pos
+ n.SetPos(pos)
return n
}
ntype := p.typeExpr(expr.Type)
dcl := p.nod(expr, ir.ODCLFUNC, nil, nil)
- fn := dcl.Func
+ fn := dcl.Func()
fn.SetIsHiddenClosure(Curfn != nil)
- fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym, fn) // filled in by typecheckclosure
- fn.Nname.Name.Param.Ntype = xtype
- fn.Nname.Name.Defn = dcl
+ fn.Nname = newfuncnamel(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
+ fn.Nname.Name().Param.Ntype = xtype
+ fn.Nname.Name().Defn = dcl
clo := p.nod(expr, ir.OCLOSURE, nil, nil)
- clo.Func = fn
+ clo.SetFunc(fn)
fn.ClosureType = ntype
fn.OClosure = clo
// make the list of pointers for the closure call.
for _, v := range fn.ClosureVars.Slice() {
// Unlink from v1; see comment in syntax.go type Param for these fields.
- v1 := v.Name.Defn
- v1.Name.Param.Innermost = v.Name.Param.Outer
+ v1 := v.Name().Defn
+ v1.Name().Param.Innermost = v.Name().Param.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
- v.Name.Param.Outer = oldname(v.Sym)
+ v.Name().Param.Outer = oldname(v.Sym())
}
return clo
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
func typecheckclosure(clo *ir.Node, top int) {
- fn := clo.Func
+ fn := clo.Func()
dcl := fn.Decl
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
}
fn.ClosureType = typecheck(fn.ClosureType, ctxType)
- clo.Type = fn.ClosureType.Type
+ clo.SetType(fn.ClosureType.Type())
fn.ClosureCalled = top&ctxCallee != 0
// Do not typecheck dcl twice, otherwise, we will end up pushing
}
for _, ln := range fn.ClosureVars.Slice() {
- n := ln.Name.Defn
- if !n.Name.Captured() {
- n.Name.SetCaptured(true)
- if n.Name.Decldepth == 0 {
+ n := ln.Name().Defn
+ if !n.Name().Captured() {
+ n.Name().SetCaptured(true)
+ if n.Name().Decldepth == 0 {
base.Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
- if n.Name.Decldepth == decldepth {
- n.Name.SetAssigned(false)
+ if n.Name().Decldepth == decldepth {
+ n.Name().SetAssigned(false)
}
}
}
- fn.Nname.Sym = closurename(Curfn)
+ fn.Nname.SetSym(closurename(Curfn))
setNodeNameFunc(fn.Nname)
dcl = typecheck(dcl, ctxStmt)
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to xtop.
- if Curfn != nil && clo.Type != nil {
+ if Curfn != nil && clo.Type() != nil {
oldfn := Curfn
Curfn = dcl
olddd := decldepth
decldepth = 1
- typecheckslice(dcl.Nbody.Slice(), ctxStmt)
+ typecheckslice(dcl.Body().Slice(), ctxStmt)
decldepth = olddd
Curfn = oldfn
}
gen := &globClosgen
if outerfunc != nil {
- if outerfunc.Func.OClosure != nil {
+ if outerfunc.Func().OClosure != nil {
prefix = ""
}
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
- if !ir.IsBlank(outerfunc.Func.Nname) {
- gen = &outerfunc.Func.Closgen
+ if !ir.IsBlank(outerfunc.Func().Nname) {
+ gen = &outerfunc.Func().Closgen
}
}
// after capturing (effectively constant).
func capturevars(dcl *ir.Node) {
lno := base.Pos
- base.Pos = dcl.Pos
- fn := dcl.Func
+ base.Pos = dcl.Pos()
+ fn := dcl.Func()
cvars := fn.ClosureVars.Slice()
out := cvars[:0]
for _, v := range cvars {
- if v.Type == nil {
+ if v.Type() == nil {
// If v.Type is nil, it means v looked like it
// was going to be used in the closure, but
// isn't. This happens in struct literals like
// type check the & of closed variables outside the closure,
// so that the outer frame also grabs them and knows they escape.
- dowidth(v.Type)
+ dowidth(v.Type())
- outer := v.Name.Param.Outer
- outermost := v.Name.Defn
+ outer := v.Name().Param.Outer
+ outermost := v.Name().Defn
// out parameters will be assigned to implicitly upon return.
- if outermost.Class() != ir.PPARAMOUT && !outermost.Name.Addrtaken() && !outermost.Name.Assigned() && v.Type.Width <= 128 {
- v.Name.SetByval(true)
+ if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
+ v.Name().SetByval(true)
} else {
- outermost.Name.SetAddrtaken(true)
+ outermost.Name().SetAddrtaken(true)
outer = ir.Nod(ir.OADDR, outer, nil)
}
if base.Flag.LowerM > 1 {
var name *types.Sym
- if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
- name = v.Name.Curfn.Func.Nname.Sym
+ if v.Name().Curfn != nil && v.Name().Curfn.Func().Nname != nil {
+ name = v.Name().Curfn.Func().Nname.Sym()
}
how := "ref"
- if v.Name.Byval() {
+ if v.Name().Byval() {
how = "value"
}
- base.WarnfAt(v.Pos, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Name.Addrtaken(), outermost.Name.Assigned(), int32(v.Type.Width))
+ base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width))
}
outer = typecheck(outer, ctxExpr)
// It transform closure bodies to properly reference captured variables.
func transformclosure(dcl *ir.Node) {
lno := base.Pos
- base.Pos = dcl.Pos
- fn := dcl.Func
+ base.Pos = dcl.Pos()
+ fn := dcl.Func()
if fn.ClosureCalled {
// If the closure is directly called, we transform it to a plain function call
var params []*types.Field
var decls []*ir.Node
for _, v := range fn.ClosureVars.Slice() {
- if !v.Name.Byval() {
+ if !v.Name().Byval() {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
- addr := NewName(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
- v.Name.Param.Heapaddr = addr
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
+ v.Name().Param.Heapaddr = addr
v = addr
}
v.SetClass(ir.PPARAM)
decls = append(decls, v)
- fld := types.NewField(src.NoXPos, v.Sym, v.Type)
+ fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
fld.Nname = ir.AsTypesNode(v)
params = append(params, fld)
}
if len(params) > 0 {
// Prepend params and decls.
- f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
+ f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...))
fn.Dcl = append(decls, fn.Dcl...)
}
- dowidth(f.Type)
- dcl.Type = f.Type // update type of ODCLFUNC
+ dowidth(f.Type())
+ dcl.SetType(f.Type()) // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
var body []*ir.Node
// cv refers to the field inside of closure OSTRUCTLIT.
cv := ir.Nod(ir.OCLOSUREVAR, nil, nil)
- cv.Type = v.Type
- if !v.Name.Byval() {
- cv.Type = types.NewPtr(v.Type)
+ cv.SetType(v.Type())
+ if !v.Name().Byval() {
+ cv.SetType(types.NewPtr(v.Type()))
}
- offset = Rnd(offset, int64(cv.Type.Align))
- cv.Xoffset = offset
- offset += cv.Type.Width
+ offset = Rnd(offset, int64(cv.Type().Align))
+ cv.SetOffset(offset)
+ offset += cv.Type().Width
- if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
+ if v.Name().Byval() && v.Type().Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
v.SetClass(ir.PAUTO)
fn.Dcl = append(fn.Dcl, v)
} else {
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
- addr := NewName(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
addr.SetClass(ir.PAUTO)
- addr.Name.SetUsed(true)
- addr.Name.Curfn = dcl
+ addr.Name().SetUsed(true)
+ addr.Name().Curfn = dcl
fn.Dcl = append(fn.Dcl, addr)
- v.Name.Param.Heapaddr = addr
- if v.Name.Byval() {
+ v.Name().Param.Heapaddr = addr
+ if v.Name().Byval() {
cv = ir.Nod(ir.OADDR, cv, nil)
}
body = append(body, ir.Nod(ir.OAS, addr, cv))
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
func hasemptycvars(clo *ir.Node) bool {
- return clo.Func.ClosureVars.Len() == 0
+ return clo.Func().ClosureVars.Len() == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
func closuredebugruntimecheck(clo *ir.Node) {
if base.Debug.Closure > 0 {
- if clo.Esc == EscHeap {
- base.WarnfAt(clo.Pos, "heap closure, captured vars = %v", clo.Func.ClosureVars)
+ if clo.Esc() == EscHeap {
+ base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars)
} else {
- base.WarnfAt(clo.Pos, "stack closure, captured vars = %v", clo.Func.ClosureVars)
+ base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func().ClosureVars)
}
}
- if base.Flag.CompilingRuntime && clo.Esc == EscHeap {
- base.ErrorfAt(clo.Pos, "heap-allocated closure, not allowed in runtime")
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
+ base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
}
}
fields := []*ir.Node{
namedfield(".F", types.Types[types.TUINTPTR]),
}
- for _, v := range clo.Func.ClosureVars.Slice() {
- typ := v.Type
- if !v.Name.Byval() {
+ for _, v := range clo.Func().ClosureVars.Slice() {
+ typ := v.Type()
+ if !v.Name().Byval() {
typ = types.NewPtr(typ)
}
- fields = append(fields, symfield(v.Sym, typ))
+ fields = append(fields, symfield(v.Sym(), typ))
}
typ := tostruct(fields)
typ.SetNoalg(true)
}
func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node {
- fn := clo.Func
+ fn := clo.Func()
// If no closure vars, don't bother wrapping.
if hasemptycvars(clo) {
if base.Debug.Closure > 0 {
- base.WarnfAt(clo.Pos, "closure converted to global")
+ base.WarnfAt(clo.Pos(), "closure converted to global")
}
return fn.Nname
}
typ := closureType(clo)
clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
- clos.Esc = clo.Esc
- clos.List.Set(append([]*ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
+ clos.SetEsc(clo.Esc())
+ clos.PtrList().Set(append([]*ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
clos = ir.Nod(ir.OADDR, clos, nil)
- clos.Esc = clo.Esc
+ clos.SetEsc(clo.Esc())
// Force type conversion from *struct to the func type.
- clos = convnop(clos, clo.Type)
+ clos = convnop(clos, clo.Type())
// non-escaping temp to use, if any.
if x := prealloc[clo]; x != nil {
- if !types.Identical(typ, x.Type) {
+ if !types.Identical(typ, x.Type()) {
panic("closure type does not match order's assigned type")
}
- clos.Left.Right = x
+ clos.Left().SetRight(x)
delete(prealloc, clo)
}
}
func typecheckpartialcall(dot *ir.Node, sym *types.Sym) {
- switch dot.Op {
+ switch dot.Op() {
case ir.ODOTINTER, ir.ODOTMETH:
break
}
// Create top-level function.
- dcl := makepartialcall(dot, dot.Type, sym)
- dcl.Func.SetWrapper(true)
- dot.Op = ir.OCALLPART
- dot.Right = NewName(sym)
- dot.Type = dcl.Type
- dot.Func = dcl.Func
+ dcl := makepartialcall(dot, dot.Type(), sym)
+ dcl.Func().SetWrapper(true)
+ dot.SetOp(ir.OCALLPART)
+ dot.SetRight(NewName(sym))
+ dot.SetType(dcl.Type())
+ dot.SetFunc(dcl.Func())
dot.SetOpt(nil) // clear types.Field from ODOTMETH
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node {
- rcvrtype := dot.Left.Type
+ rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
if sym.Uniq() {
// case. See issue 29389.
tfn := ir.Nod(ir.OTFUNC, nil, nil)
- tfn.List.Set(structargs(t0.Params(), true))
- tfn.Rlist.Set(structargs(t0.Results(), false))
+ tfn.PtrList().Set(structargs(t0.Params(), true))
+ tfn.PtrRlist().Set(structargs(t0.Results(), false))
dcl := dclfunc(sym, tfn)
- fn := dcl.Func
+ fn := dcl.Func()
fn.SetDupok(true)
fn.SetNeedctxt(true)
- tfn.Type.SetPkg(t0.Pkg())
+ tfn.Type().SetPkg(t0.Pkg())
// Declare and initialize variable holding receiver.
cv := ir.Nod(ir.OCLOSUREVAR, nil, nil)
- cv.Type = rcvrtype
- cv.Xoffset = Rnd(int64(Widthptr), int64(cv.Type.Align))
+ cv.SetType(rcvrtype)
+ cv.SetOffset(Rnd(int64(Widthptr), int64(cv.Type().Align)))
ptr := NewName(lookup(".this"))
declare(ptr, ir.PAUTO)
- ptr.Name.SetUsed(true)
+ ptr.Name().SetUsed(true)
var body []*ir.Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
- ptr.Type = rcvrtype
+ ptr.SetType(rcvrtype)
body = append(body, ir.Nod(ir.OAS, ptr, cv))
} else {
- ptr.Type = types.NewPtr(rcvrtype)
+ ptr.SetType(types.NewPtr(rcvrtype))
body = append(body, ir.Nod(ir.OAS, ptr, ir.Nod(ir.OADDR, cv, nil)))
}
call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil)
- call.List.Set(paramNnames(tfn.Type))
- call.SetIsDDD(tfn.Type.IsVariadic())
+ call.PtrList().Set(paramNnames(tfn.Type()))
+ call.SetIsDDD(tfn.Type().IsVariadic())
if t0.NumResults() != 0 {
n := ir.Nod(ir.ORETURN, nil, nil)
- n.List.Set1(call)
+ n.PtrList().Set1(call)
call = n
}
body = append(body, call)
- dcl.Nbody.Set(body)
+ dcl.PtrBody().Set(body)
funcbody()
dcl = typecheck(dcl, ctxStmt)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
Curfn = dcl
- typecheckslice(dcl.Nbody.Slice(), ctxStmt)
+ typecheckslice(dcl.Body().Slice(), ctxStmt)
sym.Def = ir.AsTypesNode(dcl)
xtop = append(xtop, dcl)
Curfn = savecurfn
func partialCallType(n *ir.Node) *types.Type {
t := tostruct([]*ir.Node{
namedfield("F", types.Types[types.TUINTPTR]),
- namedfield("R", n.Left.Type),
+ namedfield("R", n.Left().Type()),
})
t.SetNoalg(true)
return t
//
// Like walkclosure above.
- if n.Left.Type.IsInterface() {
+ if n.Left().Type().IsInterface() {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
- n.Left = cheapexpr(n.Left, init)
- n.Left = walkexpr(n.Left, nil)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetLeft(walkexpr(n.Left(), nil))
- tab := ir.Nod(ir.OITAB, n.Left, nil)
+ tab := ir.Nod(ir.OITAB, n.Left(), nil)
tab = typecheck(tab, ctxExpr)
c := ir.Nod(ir.OCHECKNIL, tab, nil)
typ := partialCallType(n)
clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
- clos.Esc = n.Esc
- clos.List.Set2(ir.Nod(ir.OCFUNC, n.Func.Nname, nil), n.Left)
+ clos.SetEsc(n.Esc())
+ clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left())
clos = ir.Nod(ir.OADDR, clos, nil)
- clos.Esc = n.Esc
+ clos.SetEsc(n.Esc())
// Force type conversion from *struct to the func type.
- clos = convnop(clos, n.Type)
+ clos = convnop(clos, n.Type())
// non-escaping temp to use, if any.
if x := prealloc[n]; x != nil {
- if !types.Identical(typ, x.Type) {
+ if !types.Identical(typ, x.Type()) {
panic("partial call type does not match order's assigned type")
}
- clos.Left.Right = x
+ clos.Left().SetRight(x)
delete(prealloc, n)
}
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
func callpartMethod(n *ir.Node) *types.Field {
- if n.Op != ir.OCALLPART {
+ if n.Op() != ir.OCALLPART {
base.Fatalf("expected OCALLPART, got %v", n)
}
// TODO(mdempsky): Optimize this. If necessary,
// makepartialcall could save m for us somewhere.
var m *types.Field
- if lookdot0(n.Right.Sym, n.Left.Type, &m, false) != 1 {
+ if lookdot0(n.Right().Sym(), n.Left().Type(), &m, false) != 1 {
base.Fatalf("failed to find field for OCALLPART")
}
base.Fatalf("bad conversion to untyped: %v", t)
}
- if n == nil || n.Type == nil {
+ if n == nil || n.Type() == nil {
// Allow sloppy callers.
return n
}
- if !n.Type.IsUntyped() {
+ if !n.Type().IsUntyped() {
// Already typed; nothing to do.
return n
}
- if n.Op == ir.OLITERAL || n.Op == ir.ONIL {
+ if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// Can't always set n.Type directly on OLITERAL nodes.
// See discussion on CL 20813.
n = n.RawCopy()
}
// Nil is technically not a constant, so handle it specially.
- if n.Type.Etype == types.TNIL {
- if n.Op != ir.ONIL {
- base.Fatalf("unexpected op: %v (%v)", n, n.Op)
+ if n.Type().Etype == types.TNIL {
+ if n.Op() != ir.ONIL {
+ base.Fatalf("unexpected op: %v (%v)", n, n.Op())
}
if t == nil {
base.Errorf("use of untyped nil")
n.SetDiag(true)
- n.Type = nil
+ n.SetType(nil)
return n
}
return n
}
- n.Type = t
+ n.SetType(t)
return n
}
if t == nil || !ir.OKForConst[t.Etype] {
- t = defaultType(n.Type)
+ t = defaultType(n.Type())
}
- switch n.Op {
+ switch n.Op() {
default:
base.Fatalf("unexpected untyped expression: %v", n)
if v.Kind() == constant.Unknown {
break
}
- n.Type = t
+ n.SetType(t)
n.SetVal(v)
return n
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG:
- ot := operandType(n.Op, t)
+ ot := operandType(n.Op(), t)
if ot == nil {
n = defaultlit(n, nil)
break
}
- n.Left = convlit(n.Left, ot)
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(convlit(n.Left(), ot))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
return n
case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX:
- ot := operandType(n.Op, t)
+ ot := operandType(n.Op(), t)
if ot == nil {
n = defaultlit(n, nil)
break
}
- n.Left = convlit(n.Left, ot)
- n.Right = convlit(n.Right, ot)
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
+ n.SetLeft(convlit(n.Left(), ot))
+ n.SetRight(convlit(n.Right(), ot))
+ if n.Left().Type() == nil || n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
- if !types.Identical(n.Left.Type, n.Right.Type) {
- base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, n.Left.Type, n.Right.Type)
- n.Type = nil
+ if !types.Identical(n.Left().Type(), n.Right().Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, n.Left().Type(), n.Right().Type())
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
return n
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
if !t.IsBoolean() {
break
}
- n.Type = t
+ n.SetType(t)
return n
case ir.OLSH, ir.ORSH:
- n.Left = convlit1(n.Left, t, explicit, nil)
- n.Type = n.Left.Type
- if n.Type != nil && !n.Type.IsInteger() {
- base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type)
- n.Type = nil
+ n.SetLeft(convlit1(n.Left(), t, explicit, nil))
+ n.SetType(n.Left().Type())
+ if n.Type() != nil && !n.Type().IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type())
+ n.SetType(nil)
}
return n
}
}
n.SetDiag(true)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
// Otherwise, evalConst returns a new OLITERAL with the same value as n,
// and with .Orig pointing back to n.
func evalConst(n *ir.Node) *ir.Node {
- nl, nr := n.Left, n.Right
+ nl, nr := n.Left(), n.Right()
// Pick off just the opcodes that can be constant evaluated.
- switch op := n.Op; op {
+ switch op := n.Op(); op {
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
- if nl.Op == ir.OLITERAL {
+ if nl.Op() == ir.OLITERAL {
var prec uint
- if n.Type.IsUnsigned() {
- prec = uint(n.Type.Size() * 8)
+ if n.Type().IsUnsigned() {
+ prec = uint(n.Type().Size() * 8)
}
return origConst(n, constant.UnaryOp(tokenForOp[op], nl.Val(), prec))
}
case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND:
- if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL {
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
rval := nr.Val()
// check for divisor underflow in complex division (see issue 20227)
- if op == ir.ODIV && n.Type.IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 {
+ if op == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 {
base.Errorf("complex division by zero")
- n.Type = nil
+ n.SetType(nil)
return n
}
if (op == ir.ODIV || op == ir.OMOD) && constant.Sign(rval) == 0 {
base.Errorf("division by zero")
- n.Type = nil
+ n.SetType(nil)
return n
}
tok := tokenForOp[op]
- if op == ir.ODIV && n.Type.IsInteger() {
+ if op == ir.ODIV && n.Type().IsInteger() {
tok = token.QUO_ASSIGN // integer division
}
return origConst(n, constant.BinaryOp(nl.Val(), tok, rval))
}
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL {
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
return origBoolConst(n, constant.Compare(nl.Val(), tokenForOp[op], nr.Val()))
}
case ir.OLSH, ir.ORSH:
- if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL {
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
// shiftBound from go/types; "so we can express smallestFloat64"
const shiftBound = 1023 - 1 + 52
s, ok := constant.Uint64Val(nr.Val())
if !ok || s > shiftBound {
base.Errorf("invalid shift count %v", nr)
- n.Type = nil
+ n.SetType(nil)
break
}
return origConst(n, constant.Shift(toint(nl.Val()), tokenForOp[op], uint(s)))
}
case ir.OCONV, ir.ORUNESTR:
- if ir.OKForConst[n.Type.Etype] && nl.Op == ir.OLITERAL {
- return origConst(n, convertVal(nl.Val(), n.Type, true))
+ if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL {
+ return origConst(n, convertVal(nl.Val(), n.Type(), true))
}
case ir.OCONVNOP:
- if ir.OKForConst[n.Type.Etype] && nl.Op == ir.OLITERAL {
+ if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL {
// set so n.Orig gets OCONV instead of OCONVNOP
- n.Op = ir.OCONV
+ n.SetOp(ir.OCONV)
return origConst(n, nl.Val())
}
case ir.OADDSTR:
// Merge adjacent constants in the argument list.
- s := n.List.Slice()
+ s := n.List().Slice()
need := 0
for i := 0; i < len(s); i++ {
if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) {
}
nl := origConst(s[i], constant.MakeString(strings.Join(strs, "")))
- nl.Orig = nl // it's bigger than just s[i]
+ nl.SetOrig(nl) // it's bigger than just s[i]
newList = append(newList, nl)
i = i2 - 1
} else {
}
n = ir.Copy(n)
- n.List.Set(newList)
+ n.PtrList().Set(newList)
return n
case ir.OCAP, ir.OLEN:
- switch nl.Type.Etype {
+ switch nl.Type().Etype {
case types.TSTRING:
if ir.IsConst(nl, constant.String) {
return origIntConst(n, int64(len(nl.StringVal())))
}
case types.TARRAY:
if !hascallchan(nl) {
- return origIntConst(n, nl.Type.NumElem())
+ return origIntConst(n, nl.Type().NumElem())
}
}
return origIntConst(n, evalunsafe(n))
case ir.OREAL:
- if nl.Op == ir.OLITERAL {
+ if nl.Op() == ir.OLITERAL {
return origConst(n, constant.Real(nl.Val()))
}
case ir.OIMAG:
- if nl.Op == ir.OLITERAL {
+ if nl.Op() == ir.OLITERAL {
return origConst(n, constant.Imag(nl.Val()))
}
case ir.OCOMPLEX:
- if nl.Op == ir.OLITERAL && nr.Op == ir.OLITERAL {
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
return origConst(n, makeComplex(nl.Val(), nr.Val()))
}
}
// origConst returns an OLITERAL with orig n and value v.
func origConst(n *ir.Node, v constant.Value) *ir.Node {
lno := setlineno(n)
- v = convertVal(v, n.Type, false)
+ v = convertVal(v, n.Type(), false)
base.Pos = lno
switch v.Kind() {
}
fallthrough
case constant.Unknown:
- what := overflowNames[n.Op]
+ what := overflowNames[n.Op()]
if what == "" {
- base.Fatalf("unexpected overflow: %v", n.Op)
+ base.Fatalf("unexpected overflow: %v", n.Op())
}
- base.ErrorfAt(n.Pos, "constant %v overflow", what)
- n.Type = nil
+ base.ErrorfAt(n.Pos(), "constant %v overflow", what)
+ n.SetType(nil)
return n
}
orig := n
- n = ir.NodAt(orig.Pos, ir.OLITERAL, nil, nil)
- n.Orig = orig
- n.Type = orig.Type
+ n = ir.NodAt(orig.Pos(), ir.OLITERAL, nil, nil)
+ n.SetOrig(orig)
+ n.SetType(orig.Type())
n.SetVal(v)
return n
}
// The results of defaultlit2 MUST be assigned back to l and r, e.g.
// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
func defaultlit2(l *ir.Node, r *ir.Node, force bool) (*ir.Node, *ir.Node) {
- if l.Type == nil || r.Type == nil {
+ if l.Type() == nil || r.Type() == nil {
return l, r
}
- if !l.Type.IsUntyped() {
- r = convlit(r, l.Type)
+ if !l.Type().IsUntyped() {
+ r = convlit(r, l.Type())
return l, r
}
- if !r.Type.IsUntyped() {
- l = convlit(l, r.Type)
+ if !r.Type().IsUntyped() {
+ l = convlit(l, r.Type())
return l, r
}
}
// Can't mix bool with non-bool, string with non-string, or nil with anything (untyped).
- if l.Type.IsBoolean() != r.Type.IsBoolean() {
+ if l.Type().IsBoolean() != r.Type().IsBoolean() {
return l, r
}
- if l.Type.IsString() != r.Type.IsString() {
+ if l.Type().IsString() != r.Type().IsString() {
return l, r
}
if ir.IsNil(l) || ir.IsNil(r) {
return l, r
}
- t := defaultType(mixUntyped(l.Type, r.Type))
+ t := defaultType(mixUntyped(l.Type(), r.Type()))
l = convlit(l, t)
r = convlit(r, t)
return l, r
}
func smallintconst(n *ir.Node) bool {
- if n.Op == ir.OLITERAL {
+ if n.Op() == ir.OLITERAL {
v, ok := constant.Int64Val(n.Val())
return ok && int64(int32(v)) == v
}
// integer, or negative, it returns -1. If n is too large, it
// returns -2.
func indexconst(n *ir.Node) int64 {
- if n.Op != ir.OLITERAL {
+ if n.Op() != ir.OLITERAL {
return -1
}
- if !n.Type.IsInteger() && n.Type.Etype != types.TIDEAL {
+ if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL {
return -1
}
// Expressions derived from nil, like string([]byte(nil)), while they
// may be known at compile time, are not Go language constants.
func isGoConst(n *ir.Node) bool {
- return n.Op == ir.OLITERAL
+ return n.Op() == ir.OLITERAL
}
func hascallchan(n *ir.Node) bool {
if n == nil {
return false
}
- switch n.Op {
+ switch n.Op() {
case ir.OAPPEND,
ir.OCALL,
ir.OCALLFUNC,
return true
}
- if hascallchan(n.Left) || hascallchan(n.Right) {
+ if hascallchan(n.Left()) || hascallchan(n.Right()) {
return true
}
- for _, n1 := range n.List.Slice() {
+ for _, n1 := range n.List().Slice() {
if hascallchan(n1) {
return true
}
}
- for _, n2 := range n.Rlist.Slice() {
+ for _, n2 := range n.Rlist().Slice() {
if hascallchan(n2) {
return true
}
//
// n must not be an untyped constant.
func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) {
- if n.Op == ir.OCONVIFACE && n.Implicit() {
- n = n.Left
+ if n.Op() == ir.OCONVIFACE && n.Implicit() {
+ n = n.Left()
}
if !isGoConst(n) {
return
}
- if n.Type.IsUntyped() {
+ if n.Type().IsUntyped() {
base.Fatalf("%v is untyped", n)
}
// #21866 by treating all type aliases like byte/uint8 and
// rune/int32.
- typ := n.Type
+ typ := n.Type()
switch typ {
case types.Bytetype:
typ = types.Types[types.TUINT8]
k := constSetKey{typ, ir.ConstValue(n)}
if hasUniquePos(n) {
- pos = n.Pos
+ pos = n.Pos()
}
if s.m == nil {
return
}
- if n.Name == nil {
+ if n.Name() == nil {
// named OLITERAL needs Name; most OLITERALs don't.
- n.Name = new(ir.Name)
+ n.SetName(new(ir.Name))
}
- s := n.Sym
+ s := n.Sym()
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
if !inimport && !typecheckok && s.Pkg != ir.LocalPkg {
- base.ErrorfAt(n.Pos, "cannot declare name %v", s)
+ base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
}
gen := 0
if ctxt == ir.PEXTERN {
if s.Name == "init" {
- base.ErrorfAt(n.Pos, "cannot declare init - must be func")
+ base.ErrorfAt(n.Pos(), "cannot declare init - must be func")
}
if s.Name == "main" && s.Pkg.Name == "main" {
- base.ErrorfAt(n.Pos, "cannot declare main - must be func")
+ base.ErrorfAt(n.Pos(), "cannot declare main - must be func")
}
externdcl = append(externdcl, n)
} else {
if Curfn == nil && ctxt == ir.PAUTO {
- base.Pos = n.Pos
+ base.Pos = n.Pos()
base.Fatalf("automatic outside function")
}
if Curfn != nil && ctxt != ir.PFUNC {
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
}
- if n.Op == ir.OTYPE {
+ if n.Op() == ir.OTYPE {
declare_typegen++
gen = declare_typegen
- } else if n.Op == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") {
+ } else if n.Op() == ir.ONAME && ctxt == ir.PAUTO && !strings.Contains(s.Name, "·") {
vargen++
gen = vargen
}
types.Pushdcl(s)
- n.Name.Curfn = Curfn
+ n.Name().Curfn = Curfn
}
if ctxt == ir.PAUTO {
- n.Xoffset = 0
+ n.SetOffset(0)
}
if s.Block == types.Block {
// functype will print errors about duplicate function arguments.
// Don't repeat the error here.
if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT {
- redeclare(n.Pos, s, "in this block")
+ redeclare(n.Pos(), s, "in this block")
}
}
s.Block = types.Block
s.Lastlineno = base.Pos
s.Def = ir.AsTypesNode(n)
- n.Name.Vargen = int32(gen)
+ n.Name().Vargen = int32(gen)
n.SetClass(ctxt)
if ctxt == ir.PFUNC {
- n.Sym.SetFunc(true)
+ n.Sym().SetFunc(true)
}
autoexport(n, ctxt)
}
func addvar(n *ir.Node, t *types.Type, ctxt ir.Class) {
- if n == nil || n.Sym == nil || (n.Op != ir.ONAME && n.Op != ir.ONONAME) || t == nil {
+ if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil {
base.Fatalf("addvar: n=%v t=%v nil", n, t)
}
- n.Op = ir.ONAME
+ n.SetOp(ir.ONAME)
declare(n, ctxt)
- n.Type = t
+ n.SetType(t)
}
// declare variables from grammar
if len(el) == 1 && len(vl) > 1 {
e := el[0]
as2 := ir.Nod(ir.OAS2, nil, nil)
- as2.List.Set(vl)
- as2.Rlist.Set1(e)
+ as2.PtrList().Set(vl)
+ as2.PtrRlist().Set1(e)
for _, v := range vl {
- v.Op = ir.ONAME
+ v.SetOp(ir.ONAME)
declare(v, dclcontext)
- v.Name.Param.Ntype = t
- v.Name.Defn = as2
+ v.Name().Param.Ntype = t
+ v.Name().Defn = as2
if Curfn != nil {
init = append(init, ir.Nod(ir.ODCL, v, nil))
}
el = el[1:]
}
- v.Op = ir.ONAME
+ v.SetOp(ir.ONAME)
declare(v, dclcontext)
- v.Name.Param.Ntype = t
+ v.Name().Param.Ntype = t
if e != nil || Curfn != nil || ir.IsBlank(v) {
if Curfn != nil {
}
e = ir.Nod(ir.OAS, v, e)
init = append(init, e)
- if e.Right != nil {
- v.Name.Defn = e
+ if e.Right() != nil {
+ v.Name().Defn = e
}
}
}
base.Fatalf("newnoname nil")
}
n := ir.Nod(ir.ONONAME, nil, nil)
- n.Sym = s
- n.Xoffset = 0
+ n.SetSym(s)
+ n.SetOffset(0)
return n
}
base.Fatalf("newfuncnamel - already have name")
}
n := ir.NewNameAt(pos, s)
- n.Func = fn
+ n.SetFunc(fn)
fn.Nname = n
return n
}
// being declared.
func dclname(s *types.Sym) *ir.Node {
n := NewName(s)
- n.Op = ir.ONONAME // caller will correct it
+ n.SetOp(ir.ONONAME) // caller will correct it
return n
}
// if we copied another type with *t = *u
// then t->nod might be out of date, so
// check t->nod->type too
- if ir.AsNode(t.Nod) == nil || ir.AsNode(t.Nod).Type != t {
+ if ir.AsNode(t.Nod) == nil || ir.AsNode(t.Nod).Type() != t {
t.Nod = ir.AsTypesNode(ir.NodAt(pos, ir.OTYPE, nil, nil))
- ir.AsNode(t.Nod).Type = t
- ir.AsNode(t.Nod).Sym = t.Sym
+ ir.AsNode(t.Nod).SetType(t)
+ ir.AsNode(t.Nod).SetSym(t.Sym)
}
return ir.AsNode(t.Nod)
func symfield(s *types.Sym, typ *types.Type) *ir.Node {
n := nodSym(ir.ODCLFIELD, nil, s)
- n.Type = typ
+ n.SetType(typ)
return n
}
return newnoname(s)
}
- if Curfn != nil && n.Op == ir.ONAME && n.Name.Curfn != nil && n.Name.Curfn != Curfn {
+ if Curfn != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != Curfn {
// Inner func is referring to var in outer func.
//
// TODO(rsc): If there is an outer variable x and we
// are parsing x := 5 inside the closure, until we get to
// the := it looks like a reference to the outer x so we'll
// make x a closure variable unnecessarily.
- c := n.Name.Param.Innermost
- if c == nil || c.Name.Curfn != Curfn {
+ c := n.Name().Param.Innermost
+ if c == nil || c.Name().Curfn != Curfn {
// Do not have a closure var for the active closure yet; make one.
c = NewName(s)
c.SetClass(ir.PAUTOHEAP)
- c.Name.SetIsClosureVar(true)
+ c.Name().SetIsClosureVar(true)
c.SetIsDDD(n.IsDDD())
- c.Name.Defn = n
+ c.Name().Defn = n
// Link into list of active closure variables.
// Popped from list in func funcLit.
- c.Name.Param.Outer = n.Name.Param.Innermost
- n.Name.Param.Innermost = c
+ c.Name().Param.Outer = n.Name().Param.Innermost
+ n.Name().Param.Innermost = c
- Curfn.Func.ClosureVars.Append(c)
+ Curfn.Func().ClosureVars.Append(c)
}
// return ref to closure var, not original
// := declarations
func colasname(n *ir.Node) bool {
- switch n.Op {
+ switch n.Op() {
case ir.ONAME,
ir.ONONAME,
ir.OPACK,
ir.OTYPE,
ir.OLITERAL:
- return n.Sym != nil
+ return n.Sym() != nil
}
return false
func colasdefn(left []*ir.Node, defn *ir.Node) {
for _, n := range left {
- if n.Sym != nil {
- n.Sym.SetUniq(true)
+ if n.Sym() != nil {
+ n.Sym().SetUniq(true)
}
}
continue
}
if !colasname(n) {
- base.ErrorfAt(defn.Pos, "non-name %v on left side of :=", n)
+ base.ErrorfAt(defn.Pos(), "non-name %v on left side of :=", n)
nerr++
continue
}
- if !n.Sym.Uniq() {
- base.ErrorfAt(defn.Pos, "%v repeated on left side of :=", n.Sym)
+ if !n.Sym().Uniq() {
+ base.ErrorfAt(defn.Pos(), "%v repeated on left side of :=", n.Sym())
n.SetDiag(true)
nerr++
continue
}
- n.Sym.SetUniq(false)
- if n.Sym.Block == types.Block {
+ n.Sym().SetUniq(false)
+ if n.Sym().Block == types.Block {
continue
}
nnew++
- n = NewName(n.Sym)
+ n = NewName(n.Sym())
declare(n, dclcontext)
- n.Name.Defn = defn
- defn.Ninit.Append(ir.Nod(ir.ODCL, n, nil))
+ n.Name().Defn = defn
+ defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
left[i] = n
}
if nnew == 0 && nerr == 0 {
- base.ErrorfAt(defn.Pos, "no new variables on left side of :=")
+ base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
}
}
// declare the arguments in an
// interface field declaration.
func ifacedcl(n *ir.Node) {
- if n.Op != ir.ODCLFIELD || n.Left == nil {
+ if n.Op() != ir.ODCLFIELD || n.Left() == nil {
base.Fatalf("ifacedcl")
}
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
base.Errorf("methods must have a unique non-blank name")
}
}
types.Markdcl()
- if n.Func.Nname != nil && n.Func.Nname.Name.Param.Ntype != nil {
- funcargs(n.Func.Nname.Name.Param.Ntype)
+ if n.Func().Nname != nil && n.Func().Nname.Name().Param.Ntype != nil {
+ funcargs(n.Func().Nname.Name().Param.Ntype)
} else {
- funcargs2(n.Type)
+ funcargs2(n.Type())
}
}
func funcargs(nt *ir.Node) {
- if nt.Op != ir.OTFUNC {
- base.Fatalf("funcargs %v", nt.Op)
+ if nt.Op() != ir.OTFUNC {
+ base.Fatalf("funcargs %v", nt.Op())
}
// re-start the variable generation number
// TODO(mdempsky): This is ugly, and only necessary because
// esc.go uses Vargen to figure out result parameters' index
// within the result tuple.
- vargen = nt.Rlist.Len()
+ vargen = nt.Rlist().Len()
// declare the receiver and in arguments.
- if nt.Left != nil {
- funcarg(nt.Left, ir.PPARAM)
+ if nt.Left() != nil {
+ funcarg(nt.Left(), ir.PPARAM)
}
- for _, n := range nt.List.Slice() {
+ for _, n := range nt.List().Slice() {
funcarg(n, ir.PPARAM)
}
vargen = 0
// declare the out arguments.
- gen := nt.List.Len()
- for _, n := range nt.Rlist.Slice() {
- if n.Sym == nil {
+ gen := nt.List().Len()
+ for _, n := range nt.Rlist().Slice() {
+ if n.Sym() == nil {
// Name so that escape analysis can track it. ~r stands for 'result'.
- n.Sym = lookupN("~r", gen)
+ n.SetSym(lookupN("~r", gen))
gen++
}
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
// Give it a name so we can assign to it during return. ~b stands for 'blank'.
// The name must be different from ~r above because if you have
// func f() (_ int)
// func g() int
// f is allowed to use a plain 'return' with no arguments, while g is not.
// So the two cases must be distinguished.
- n.Sym = lookupN("~b", gen)
+ n.SetSym(lookupN("~b", gen))
gen++
}
}
func funcarg(n *ir.Node, ctxt ir.Class) {
- if n.Op != ir.ODCLFIELD {
- base.Fatalf("funcarg %v", n.Op)
+ if n.Op() != ir.ODCLFIELD {
+ base.Fatalf("funcarg %v", n.Op())
}
- if n.Sym == nil {
+ if n.Sym() == nil {
return
}
- n.Right = ir.NewNameAt(n.Pos, n.Sym)
- n.Right.Name.Param.Ntype = n.Left
- n.Right.SetIsDDD(n.IsDDD())
- declare(n.Right, ctxt)
+ n.SetRight(ir.NewNameAt(n.Pos(), n.Sym()))
+ n.Right().Name().Param.Ntype = n.Left()
+ n.Right().SetIsDDD(n.IsDDD())
+ declare(n.Right(), ctxt)
vargen++
- n.Right.Name.Vargen = int32(vargen)
+ n.Right().Name().Vargen = int32(vargen)
}
// Same as funcargs, except run over an already constructed TFUNC.
}
n := ir.NewNameAt(f.Pos, f.Sym)
f.Nname = ir.AsTypesNode(n)
- n.Type = f.Type
+ n.SetType(f.Type)
n.SetIsDDD(f.IsDDD())
declare(n, ctxt)
}
func structfield(n *ir.Node) *types.Field {
lno := base.Pos
- base.Pos = n.Pos
+ base.Pos = n.Pos()
- if n.Op != ir.ODCLFIELD {
+ if n.Op() != ir.ODCLFIELD {
base.Fatalf("structfield: oops %v\n", n)
}
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxType)
- n.Type = n.Left.Type
- n.Left = nil
+ if n.Left() != nil {
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ n.SetType(n.Left().Type())
+ n.SetLeft(nil)
}
- f := types.NewField(n.Pos, n.Sym, n.Type)
+ f := types.NewField(n.Pos(), n.Sym(), n.Type())
if n.Embedded() {
- checkembeddedtype(n.Type)
+ checkembeddedtype(n.Type())
f.Embedded = 1
}
if n.HasVal() {
for i, n := range l {
f := structfield(n)
f.SetIsDDD(n.IsDDD())
- if n.Right != nil {
- n.Right.Type = f.Type
- f.Nname = ir.AsTypesNode(n.Right)
+ if n.Right() != nil {
+ n.Right().SetType(f.Type)
+ f.Nname = ir.AsTypesNode(n.Right())
}
if f.Broke() {
t.SetBroke(true)
func interfacefield(n *ir.Node) *types.Field {
lno := base.Pos
- base.Pos = n.Pos
+ base.Pos = n.Pos()
- if n.Op != ir.ODCLFIELD {
+ if n.Op() != ir.ODCLFIELD {
base.Fatalf("interfacefield: oops %v\n", n)
}
// If Sym != nil, then Sym is MethodName and Left is Signature.
// Otherwise, Left is InterfaceTypeName.
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxType)
- n.Type = n.Left.Type
- n.Left = nil
+ if n.Left() != nil {
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ n.SetType(n.Left().Type())
+ n.SetLeft(nil)
}
- f := types.NewField(n.Pos, n.Sym, n.Type)
+ f := types.NewField(n.Pos(), n.Sym(), n.Type())
base.Pos = lno
return f
}
f := types.NewField(base.Pos, msym, t)
- f.Nname = ir.AsTypesNode(n.Func.Nname)
+ f.Nname = ir.AsTypesNode(n.Func().Nname)
f.SetNointerface(nointerface)
mt.Methods().Append(f)
// setNodeNameFunc marks a node as a function.
func setNodeNameFunc(n *ir.Node) {
- if n.Op != ir.ONAME || n.Class() != ir.Pxxx {
+ if n.Op() != ir.ONAME || n.Class() != ir.Pxxx {
base.Fatalf("expected ONAME/Pxxx node, got %v", n)
}
n.SetClass(ir.PFUNC)
- n.Sym.SetFunc(true)
+ n.Sym().SetFunc(true)
}
func dclfunc(sym *types.Sym, tfn *ir.Node) *ir.Node {
- if tfn.Op != ir.OTFUNC {
+ if tfn.Op() != ir.OTFUNC {
base.Fatalf("expected OTFUNC node, got %v", tfn)
}
fn := ir.Nod(ir.ODCLFUNC, nil, nil)
- fn.Func.Nname = newfuncnamel(base.Pos, sym, fn.Func)
- fn.Func.Nname.Name.Defn = fn
- fn.Func.Nname.Name.Param.Ntype = tfn
- setNodeNameFunc(fn.Func.Nname)
+ fn.Func().Nname = newfuncnamel(base.Pos, sym, fn.Func())
+ fn.Func().Nname.Name().Defn = fn
+ fn.Func().Nname.Name().Param.Ntype = tfn
+ setNodeNameFunc(fn.Func().Nname)
funchdr(fn)
- fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType)
+ fn.Func().Nname.Name().Param.Ntype = typecheck(fn.Func().Nname.Name().Param.Ntype, ctxType)
return fn
}
// directly. This has to happen before transformclosure since
// it's a lot harder to work out the argument after.
for _, n := range xtop {
- if n.Op != ir.ODCLFUNC {
+ if n.Op() != ir.ODCLFUNC {
continue
}
c.curfn = n
}
func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool {
- if n.Op != ir.OCALLFUNC {
+ if n.Op() != ir.OCALLFUNC {
return true
}
- fn := n.Left
- if fn == nil || fn.Op != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name.Defn == nil {
+ fn := n.Left()
+ if fn == nil || fn.Op() != ir.ONAME || fn.Class() != ir.PFUNC || fn.Name().Defn == nil {
return true
}
- if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" {
+ if !isRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
return true
}
var callee *ir.Node
- arg := n.List.First()
- switch arg.Op {
+ arg := n.List().First()
+ switch arg.Op() {
case ir.ONAME:
- callee = arg.Name.Defn
+ callee = arg.Name().Defn
case ir.OCLOSURE:
- callee = arg.Func.Decl
+ callee = arg.Func().Decl
default:
base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
}
- if callee.Op != ir.ODCLFUNC {
+ if callee.Op() != ir.ODCLFUNC {
base.Fatalf("expected ODCLFUNC node, got %+v", callee)
}
- c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos})
+ c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
return true
}
//
// This can be called concurrently for different from Nodes.
func (c *nowritebarrierrecChecker) recordCall(from *ir.Node, to *obj.LSym, pos src.XPos) {
- if from.Op != ir.ODCLFUNC {
+ if from.Op() != ir.ODCLFUNC {
base.Fatalf("expected ODCLFUNC, got %v", from)
}
// We record this information on the *Func so this is
// concurrent-safe.
- fn := from.Func
+ fn := from.Func()
if fn.NWBRCalls == nil {
fn.NWBRCalls = new([]ir.SymAndPos)
}
var q ir.NodeQueue
for _, n := range xtop {
- if n.Op != ir.ODCLFUNC {
+ if n.Op() != ir.ODCLFUNC {
continue
}
- symToFunc[n.Func.LSym] = n
+ symToFunc[n.Func().LSym] = n
// Make nowritebarrierrec functions BFS roots.
- if n.Func.Pragma&ir.Nowritebarrierrec != 0 {
+ if n.Func().Pragma&ir.Nowritebarrierrec != 0 {
funcs[n] = nowritebarrierrecCall{}
q.PushRight(n)
}
// Check go:nowritebarrier functions.
- if n.Func.Pragma&ir.Nowritebarrier != 0 && n.Func.WBPos.IsKnown() {
- base.ErrorfAt(n.Func.WBPos, "write barrier prohibited")
+ if n.Func().Pragma&ir.Nowritebarrier != 0 && n.Func().WBPos.IsKnown() {
+ base.ErrorfAt(n.Func().WBPos, "write barrier prohibited")
}
}
// Perform a BFS of the call graph from all
// go:nowritebarrierrec functions.
enqueue := func(src, target *ir.Node, pos src.XPos) {
- if target.Func.Pragma&ir.Yeswritebarrierrec != 0 {
+ if target.Func().Pragma&ir.Yeswritebarrierrec != 0 {
// Don't flow into this function.
return
}
fn := q.PopLeft()
// Check fn.
- if fn.Func.WBPos.IsKnown() {
+ if fn.Func().WBPos.IsKnown() {
var err bytes.Buffer
call := funcs[fn]
for call.target != nil {
- fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Func.Nname)
+ fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Func().Nname)
call = funcs[call.target]
}
- base.ErrorfAt(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String())
+ base.ErrorfAt(fn.Func().WBPos, "write barrier prohibited by caller; %v%s", fn.Func().Nname, err.String())
continue
}
for _, callee := range c.extraCalls[fn] {
enqueue(fn, callee.target, callee.lineno)
}
- if fn.Func.NWBRCalls == nil {
+ if fn.Func().NWBRCalls == nil {
continue
}
- for _, callee := range *fn.Func.NWBRCalls {
+ for _, callee := range *fn.Func().NWBRCalls {
target := symToFunc[callee.Sym]
if target != nil {
enqueue(fn, target, callee.Pos)
dcl := preInliningDcls(fnsym)
m := make(map[varPos]int)
for i, n := range dcl {
- pos := base.Ctxt.InnermostPos(n.Pos)
+ pos := base.Ctxt.InnermostPos(n.Pos())
vp := varPos{
- DeclName: unversion(n.Sym.Name),
+ DeclName: unversion(n.Sym().Name),
DeclFile: pos.RelFilename(),
DeclLine: pos.RelLine(),
DeclCol: pos.Col(),
}
if _, found := m[vp]; found {
- base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym.Name, fnsym.Name)
+ base.Fatalf("child dcl collision on symbol %s within %v\n", n.Sym().Name, fnsym.Name)
}
m[vp] = i
}
v := names[0]
if dclcontext != ir.PEXTERN {
numLocalEmbed++
- v = ir.NewNameAt(v.Pos, lookupN("embed.", numLocalEmbed))
- v.Sym.Def = ir.AsTypesNode(v)
- v.Name.Param.Ntype = typ
+ v = ir.NewNameAt(v.Pos(), lookupN("embed.", numLocalEmbed))
+ v.Sym().Def = ir.AsTypesNode(v)
+ v.Name().Param.Ntype = typ
v.SetClass(ir.PEXTERN)
externdcl = append(externdcl, v)
exprs = []*ir.Node{v}
}
- v.Name.Param.SetEmbedFiles(list)
+ v.Name().Param.SetEmbedFiles(list)
embedlist = append(embedlist, v)
return exprs
}
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
func embedKindApprox(typ *ir.Node) int {
- if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
+ if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
// These are not guaranteed to match only string and []byte -
// maybe the local package has redefined one of those words.
// But it's the best we can do now during the noder.
// The stricter check happens later, in initEmbed calling embedKind.
- if typ.Sym != nil && typ.Sym.Name == "string" && typ.Sym.Pkg == ir.LocalPkg {
+ if typ.Sym() != nil && typ.Sym().Name == "string" && typ.Sym().Pkg == ir.LocalPkg {
return embedString
}
- if typ.Op == ir.OTARRAY && typ.Left == nil && typ.Right.Sym != nil && typ.Right.Sym.Name == "byte" && typ.Right.Sym.Pkg == ir.LocalPkg {
+ if typ.Op() == ir.OTARRAY && typ.Left() == nil && typ.Right().Sym() != nil && typ.Right().Sym().Name == "byte" && typ.Right().Sym().Pkg == ir.LocalPkg {
return embedBytes
}
return embedUnknown
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func initEmbed(v *ir.Node) {
- files := v.Name.Param.EmbedFiles()
- switch kind := embedKind(v.Type); kind {
+ files := v.Name().Param.EmbedFiles()
+ switch kind := embedKind(v.Type()); kind {
case embedUnknown:
- base.ErrorfAt(v.Pos, "go:embed cannot apply to var of type %v", v.Type)
+ base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
case embedString, embedBytes:
file := files[0]
- fsym, size, err := fileStringSym(v.Pos, base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
if err != nil {
- base.ErrorfAt(v.Pos, "embed %s: %v", file, err)
+ base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
- sym := v.Sym.Linksym()
+ sym := v.Sym().Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
}
case embedFiles:
- slicedata := base.Ctxt.Lookup(`"".` + v.Sym.Name + `.files`)
+ slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
- off = dsymptr(slicedata, off, stringsym(v.Pos, file), 0) // file string
+ off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
off = duintptr(slicedata, off, 0)
off += hashSize
} else {
- fsym, size, err := fileStringSym(v.Pos, base.Flag.Cfg.Embed.Files[file], true, hash)
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
if err != nil {
- base.ErrorfAt(v.Pos, "embed %s: %v", file, err)
+ base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
- sym := v.Sym.Linksym()
+ sym := v.Sym().Linksym()
dsymptr(sym, 0, slicedata, 0)
}
}
// escFmt is called from node printing to print information about escape analysis results.
func escFmt(n *ir.Node, short bool) string {
text := ""
- switch n.Esc {
+ switch n.Esc() {
case EscUnknown:
break
}
default:
- text = fmt.Sprintf("esc(%d)", n.Esc)
+ text = fmt.Sprintf("esc(%d)", n.Esc())
}
if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
// functions.
func escapeFuncs(fns []*ir.Node, recursive bool) {
for _, fn := range fns {
- if fn.Op != ir.ODCLFUNC {
+ if fn.Op() != ir.ODCLFUNC {
base.Fatalf("unexpected node: %v", fn)
}
}
}
func (e *Escape) initFunc(fn *ir.Node) {
- if fn.Op != ir.ODCLFUNC || fn.Esc != EscFuncUnknown {
+ if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown {
base.Fatalf("unexpected node: %v", fn)
}
- fn.Esc = EscFuncPlanned
+ fn.SetEsc(EscFuncPlanned)
if base.Flag.LowerM > 3 {
ir.Dump("escAnalyze", fn)
}
e.loopDepth = 1
// Allocate locations for local variables.
- for _, dcl := range fn.Func.Dcl {
- if dcl.Op == ir.ONAME {
+ for _, dcl := range fn.Func().Dcl {
+ if dcl.Op() == ir.ONAME {
e.newLoc(dcl, false)
}
}
}
func (e *Escape) walkFunc(fn *ir.Node) {
- fn.Esc = EscFuncStarted
+ fn.SetEsc(EscFuncStarted)
// Identify labels that mark the head of an unstructured loop.
- ir.InspectList(fn.Nbody, func(n *ir.Node) bool {
- switch n.Op {
+ ir.InspectList(fn.Body(), func(n *ir.Node) bool {
+ switch n.Op() {
case ir.OLABEL:
- n.Sym.Label = ir.AsTypesNode(nonlooping)
+ n.Sym().Label = ir.AsTypesNode(nonlooping)
case ir.OGOTO:
// If we visited the label before the goto,
// then this is a looping label.
- if n.Sym.Label == ir.AsTypesNode(nonlooping) {
- n.Sym.Label = ir.AsTypesNode(looping)
+ if n.Sym().Label == ir.AsTypesNode(nonlooping) {
+ n.Sym().Label = ir.AsTypesNode(looping)
}
}
e.curfn = fn
e.loopDepth = 1
- e.block(fn.Nbody)
+ e.block(fn.Body())
}
// Below we implement the methods for walking the AST and recording
fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, funcSym(e.curfn), n)
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
- switch n.Op {
+ switch n.Op() {
default:
base.Fatalf("unexpected stmt: %v", n)
// TODO(mdempsky): Handle dead code?
case ir.OBLOCK:
- e.stmts(n.List)
+ e.stmts(n.List())
case ir.ODCL:
// Record loop depth at declaration.
- if !ir.IsBlank(n.Left) {
- e.dcl(n.Left)
+ if !ir.IsBlank(n.Left()) {
+ e.dcl(n.Left())
}
case ir.OLABEL:
- switch ir.AsNode(n.Sym.Label) {
+ switch ir.AsNode(n.Sym().Label) {
case nonlooping:
if base.Flag.LowerM > 2 {
fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
default:
base.Fatalf("label missing tag")
}
- n.Sym.Label = nil
+ n.Sym().Label = nil
case ir.OIF:
- e.discard(n.Left)
- e.block(n.Nbody)
- e.block(n.Rlist)
+ e.discard(n.Left())
+ e.block(n.Body())
+ e.block(n.Rlist())
case ir.OFOR, ir.OFORUNTIL:
e.loopDepth++
- e.discard(n.Left)
- e.stmt(n.Right)
- e.block(n.Nbody)
+ e.discard(n.Left())
+ e.stmt(n.Right())
+ e.block(n.Body())
e.loopDepth--
case ir.ORANGE:
// for List = range Right { Nbody }
e.loopDepth++
- ks := e.addrs(n.List)
- e.block(n.Nbody)
+ ks := e.addrs(n.List())
+ e.block(n.Body())
e.loopDepth--
// Right is evaluated outside the loop.
k := e.discardHole()
if len(ks) >= 2 {
- if n.Right.Type.IsArray() {
+ if n.Right().Type().IsArray() {
k = ks[1].note(n, "range")
} else {
k = ks[1].deref(n, "range-deref")
}
}
- e.expr(e.later(k), n.Right)
+ e.expr(e.later(k), n.Right())
case ir.OSWITCH:
- typesw := n.Left != nil && n.Left.Op == ir.OTYPESW
+ typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW
var ks []EscHole
- for _, cas := range n.List.Slice() { // cases
- if typesw && n.Left.Left != nil {
- cv := cas.Rlist.First()
+ for _, cas := range n.List().Slice() { // cases
+ if typesw && n.Left().Left() != nil {
+ cv := cas.Rlist().First()
k := e.dcl(cv) // type switch variables have no ODCL.
- if cv.Type.HasPointers() {
- ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
+ if cv.Type().HasPointers() {
+ ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
}
}
- e.discards(cas.List)
- e.block(cas.Nbody)
+ e.discards(cas.List())
+ e.block(cas.Body())
}
if typesw {
- e.expr(e.teeHole(ks...), n.Left.Right)
+ e.expr(e.teeHole(ks...), n.Left().Right())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
case ir.OSELECT:
- for _, cas := range n.List.Slice() {
- e.stmt(cas.Left)
- e.block(cas.Nbody)
+ for _, cas := range n.List().Slice() {
+ e.stmt(cas.Left())
+ e.block(cas.Body())
}
case ir.OSELRECV:
- e.assign(n.Left, n.Right, "selrecv", n)
+ e.assign(n.Left(), n.Right(), "selrecv", n)
case ir.OSELRECV2:
- e.assign(n.Left, n.Right, "selrecv", n)
- e.assign(n.List.First(), nil, "selrecv", n)
+ e.assign(n.Left(), n.Right(), "selrecv", n)
+ e.assign(n.List().First(), nil, "selrecv", n)
case ir.ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
case ir.OSEND:
- e.discard(n.Left)
- e.assignHeap(n.Right, "send", n)
+ e.discard(n.Left())
+ e.assignHeap(n.Right(), "send", n)
case ir.OAS, ir.OASOP:
- e.assign(n.Left, n.Right, "assign", n)
+ e.assign(n.Left(), n.Right(), "assign", n)
case ir.OAS2:
- for i, nl := range n.List.Slice() {
- e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
+ for i, nl := range n.List().Slice() {
+ e.assign(nl, n.Rlist().Index(i), "assign-pair", n)
}
case ir.OAS2DOTTYPE: // v, ok = x.(type)
- e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
- e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
+ e.assign(n.List().First(), n.Right(), "assign-pair-dot-type", n)
+ e.assign(n.List().Second(), nil, "assign-pair-dot-type", n)
case ir.OAS2MAPR: // v, ok = m[k]
- e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
- e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
+ e.assign(n.List().First(), n.Right(), "assign-pair-mapr", n)
+ e.assign(n.List().Second(), nil, "assign-pair-mapr", n)
case ir.OAS2RECV: // v, ok = <-ch
- e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
- e.assign(n.List.Second(), nil, "assign-pair-receive", n)
+ e.assign(n.List().First(), n.Right(), "assign-pair-receive", n)
+ e.assign(n.List().Second(), nil, "assign-pair-receive", n)
case ir.OAS2FUNC:
- e.stmts(n.Right.Ninit)
- e.call(e.addrs(n.List), n.Right, nil)
+ e.stmts(n.Right().Init())
+ e.call(e.addrs(n.List()), n.Right(), nil)
case ir.ORETURN:
- results := e.curfn.Type.Results().FieldSlice()
- for i, v := range n.List.Slice() {
+ results := e.curfn.Type().Results().FieldSlice()
+ for i, v := range n.List().Slice() {
e.assign(ir.AsNode(results[i].Nname), v, "return", n)
}
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
e.call(nil, n, nil)
case ir.OGO, ir.ODEFER:
- e.stmts(n.Left.Ninit)
- e.call(nil, n.Left, n)
+ e.stmts(n.Left().Init())
+ e.call(nil, n.Left(), n)
case ir.ORETJMP:
// TODO(mdempsky): What do? esc.go just ignores it.
if n == nil {
return
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
e.exprSkipInit(k, n)
}
uintptrEscapesHack := k.uintptrEscapesHack
k.uintptrEscapesHack = false
- if uintptrEscapesHack && n.Op == ir.OCONVNOP && n.Left.Type.IsUnsafePtr() {
+ if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.Left().Type().IsUnsafePtr() {
// nop
- } else if k.derefs >= 0 && !n.Type.HasPointers() {
+ } else if k.derefs >= 0 && !n.Type().HasPointers() {
k = e.discardHole()
}
- switch n.Op {
+ switch n.Op() {
default:
base.Fatalf("unexpected expr: %v", n)
e.flow(k, e.oldLoc(n))
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE, ir.OANDAND, ir.OOROR:
- e.discard(n.Left)
- e.discard(n.Right)
+ e.discard(n.Left())
+ e.discard(n.Right())
case ir.OADDR:
- e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
+ e.expr(k.addr(n, "address-of"), n.Left()) // "address-of"
case ir.ODEREF:
- e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
+ e.expr(k.deref(n, "indirection"), n.Left()) // "indirection"
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
- e.expr(k.note(n, "dot"), n.Left)
+ e.expr(k.note(n, "dot"), n.Left())
case ir.ODOTPTR:
- e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
+ e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer"
case ir.ODOTTYPE, ir.ODOTTYPE2:
- e.expr(k.dotType(n.Type, n, "dot"), n.Left)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.Left())
case ir.OINDEX:
- if n.Left.Type.IsArray() {
- e.expr(k.note(n, "fixed-array-index-of"), n.Left)
+ if n.Left().Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.Left())
} else {
// TODO(mdempsky): Fix why reason text.
- e.expr(k.deref(n, "dot of pointer"), n.Left)
+ e.expr(k.deref(n, "dot of pointer"), n.Left())
}
- e.discard(n.Right)
+ e.discard(n.Right())
case ir.OINDEXMAP:
- e.discard(n.Left)
- e.discard(n.Right)
+ e.discard(n.Left())
+ e.discard(n.Right())
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
- e.expr(k.note(n, "slice"), n.Left)
+ e.expr(k.note(n, "slice"), n.Left())
low, high, max := n.SliceBounds()
e.discard(low)
e.discard(high)
e.discard(max)
case ir.OCONV, ir.OCONVNOP:
- if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
+ if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
- e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
- } else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
- e.unsafeValue(k, n.Left)
+ e.assignHeap(n.Left(), "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() {
+ e.unsafeValue(k, n.Left())
} else {
- e.expr(k, n.Left)
+ e.expr(k, n.Left())
}
case ir.OCONVIFACE:
- if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
+ if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) {
k = e.spill(k, n)
}
- e.expr(k.note(n, "interface-converted"), n.Left)
+ e.expr(k.note(n, "interface-converted"), n.Left())
case ir.ORECV:
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY:
e.call([]EscHole{k}, n, nil)
case ir.OMAKESLICE:
e.spill(k, n)
- e.discard(n.Left)
- e.discard(n.Right)
+ e.discard(n.Left())
+ e.discard(n.Right())
case ir.OMAKECHAN:
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.OMAKEMAP:
e.spill(k, n)
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.ORECOVER:
// nop
}
paramK := e.tagHole(ks, ir.AsNode(m.Nname), m.Type.Recv())
- e.expr(e.teeHole(paramK, closureK), n.Left)
+ e.expr(e.teeHole(paramK, closureK), n.Left())
case ir.OPTRLIT:
- e.expr(e.spill(k, n), n.Left)
+ e.expr(e.spill(k, n), n.Left())
case ir.OARRAYLIT:
- for _, elt := range n.List.Slice() {
- if elt.Op == ir.OKEY {
- elt = elt.Right
+ for _, elt := range n.List().Slice() {
+ if elt.Op() == ir.OKEY {
+ elt = elt.Right()
}
e.expr(k.note(n, "array literal element"), elt)
}
k = e.spill(k, n)
k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
- for _, elt := range n.List.Slice() {
- if elt.Op == ir.OKEY {
- elt = elt.Right
+ for _, elt := range n.List().Slice() {
+ if elt.Op() == ir.OKEY {
+ elt = elt.Right()
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
case ir.OSTRUCTLIT:
- for _, elt := range n.List.Slice() {
- e.expr(k.note(n, "struct literal element"), elt.Left)
+ for _, elt := range n.List().Slice() {
+ e.expr(k.note(n, "struct literal element"), elt.Left())
}
case ir.OMAPLIT:
e.spill(k, n)
// Map keys and values are always stored in the heap.
- for _, elt := range n.List.Slice() {
- e.assignHeap(elt.Left, "map literal key", n)
- e.assignHeap(elt.Right, "map literal value", n)
+ for _, elt := range n.List().Slice() {
+ e.assignHeap(elt.Left(), "map literal key", n)
+ e.assignHeap(elt.Right(), "map literal value", n)
}
case ir.OCLOSURE:
k = e.spill(k, n)
// Link addresses of captured variables to closure.
- for _, v := range n.Func.ClosureVars.Slice() {
- if v.Op == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs
+ for _, v := range n.Func().ClosureVars.Slice() {
+ if v.Op() == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs
continue
}
k := k
- if !v.Name.Byval() {
+ if !v.Name().Byval() {
k = k.addr(v, "reference")
}
- e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
+ e.expr(k.note(n, "captured by a closure"), v.Name().Defn)
}
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
e.spill(k, n)
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.OADDSTR:
e.spill(k, n)
// Arguments of OADDSTR never escape;
// runtime.concatstrings makes sure of that.
- e.discards(n.List)
+ e.discards(n.List())
}
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
func (e *Escape) unsafeValue(k EscHole, n *ir.Node) {
- if n.Type.Etype != types.TUINTPTR {
- base.Fatalf("unexpected type %v for %v", n.Type, n)
+ if n.Type().Etype != types.TUINTPTR {
+ base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
- switch n.Op {
+ switch n.Op() {
case ir.OCONV, ir.OCONVNOP:
- if n.Left.Type.IsUnsafePtr() {
- e.expr(k, n.Left)
+ if n.Left().Type().IsUnsafePtr() {
+ e.expr(k, n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
case ir.ODOTPTR:
if isReflectHeaderDataField(n) {
- e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
+ e.expr(k.deref(n, "reflect.Header.Data"), n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
case ir.OPLUS, ir.ONEG, ir.OBITNOT:
- e.unsafeValue(k, n.Left)
+ e.unsafeValue(k, n.Left())
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
- e.unsafeValue(k, n.Left)
- e.unsafeValue(k, n.Right)
+ e.unsafeValue(k, n.Left())
+ e.unsafeValue(k, n.Right())
case ir.OLSH, ir.ORSH:
- e.unsafeValue(k, n.Left)
+ e.unsafeValue(k, n.Left())
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
- e.discard(n.Right)
+ e.discard(n.Right())
default:
e.exprSkipInit(e.discardHole(), n)
}
k := e.heapHole()
- switch n.Op {
+ switch n.Op() {
default:
base.Fatalf("unexpected addr: %v", n)
case ir.ONAME:
}
k = e.oldLoc(n).asHole()
case ir.ODOT:
- k = e.addr(n.Left)
+ k = e.addr(n.Left())
case ir.OINDEX:
- e.discard(n.Right)
- if n.Left.Type.IsArray() {
- k = e.addr(n.Left)
+ e.discard(n.Right())
+ if n.Left().Type().IsArray() {
+ k = e.addr(n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
case ir.ODEREF, ir.ODOTPTR:
e.discard(n)
case ir.OINDEXMAP:
- e.discard(n.Left)
- e.assignHeap(n.Right, "key of map put", n)
+ e.discard(n.Left())
+ e.assignHeap(n.Right(), "key of map put", n)
}
- if !n.Type.HasPointers() {
+ if !n.Type().HasPointers() {
k = e.discardHole()
}
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
if ignore && base.Flag.LowerM != 0 {
- base.WarnfAt(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
+ base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
}
k := e.addr(dst)
- if dst != nil && dst.Op == ir.ODOTPTR && isReflectHeaderDataField(dst) {
+ if dst != nil && dst.Op() == ir.ODOTPTR && isReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
} else {
if ignore {
// should contain the holes representing where the function callee's
// results flows; where is the OGO/ODEFER context of the call, if any.
func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
- topLevelDefer := where != nil && where.Op == ir.ODEFER && e.loopDepth == 1
+ topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
if topLevelDefer {
// force stack allocation of defer record, unless
// open-coded defers are used (see ssa.go)
- where.Esc = EscNever
+ where.SetEsc(EscNever)
}
argument := func(k EscHole, arg *ir.Node) {
e.expr(k.note(call, "call parameter"), arg)
}
- switch call.Op {
+ switch call.Op() {
default:
- base.Fatalf("unexpected call op: %v", call.Op)
+ base.Fatalf("unexpected call op: %v", call.Op())
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
fixVariadicCall(call)
// Pick out the function callee, if statically known.
var fn *ir.Node
- switch call.Op {
+ switch call.Op() {
case ir.OCALLFUNC:
- switch v := staticValue(call.Left); {
- case v.Op == ir.ONAME && v.Class() == ir.PFUNC:
+ switch v := staticValue(call.Left()); {
+ case v.Op() == ir.ONAME && v.Class() == ir.PFUNC:
fn = v
- case v.Op == ir.OCLOSURE:
- fn = v.Func.Nname
+ case v.Op() == ir.OCLOSURE:
+ fn = v.Func().Nname
}
case ir.OCALLMETH:
- fn = methodExprName(call.Left)
+ fn = methodExprName(call.Left())
}
- fntype := call.Left.Type
+ fntype := call.Left().Type()
if fn != nil {
- fntype = fn.Type
+ fntype = fn.Type()
}
if ks != nil && fn != nil && e.inMutualBatch(fn) {
- for i, result := range fn.Type.Results().FieldSlice() {
+ for i, result := range fn.Type().Results().FieldSlice() {
e.expr(ks[i], ir.AsNode(result.Nname))
}
}
if r := fntype.Recv(); r != nil {
- argument(e.tagHole(ks, fn, r), call.Left.Left)
+ argument(e.tagHole(ks, fn, r), call.Left().Left())
} else {
// Evaluate callee function expression.
- argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Left())
}
- args := call.List.Slice()
+ args := call.List().Slice()
for i, param := range fntype.Params().FieldSlice() {
argument(e.tagHole(ks, fn, param), args[i])
}
case ir.OAPPEND:
- args := call.List.Slice()
+ args := call.List().Slice()
// Appendee slice may flow directly to the result, if
// it has enough capacity. Alternatively, a new heap
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
- if args[0].Type.Elem().HasPointers() {
+ if args[0].Type().Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, args[0])
if call.IsDDD() {
appendedK := e.discardHole()
- if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
+ if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, args[1])
}
case ir.OCOPY:
- argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Left())
copiedK := e.discardHole()
- if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
+ if call.Right().Type().IsSlice() && call.Right().Type().Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
- argument(copiedK, call.Right)
+ argument(copiedK, call.Right())
case ir.OPANIC:
- argument(e.heapHole(), call.Left)
+ argument(e.heapHole(), call.Left())
case ir.OCOMPLEX:
- argument(e.discardHole(), call.Left)
- argument(e.discardHole(), call.Right)
+ argument(e.discardHole(), call.Left())
+ argument(e.discardHole(), call.Right())
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
- for _, arg := range call.List.Slice() {
+ for _, arg := range call.List().Slice() {
argument(e.discardHole(), arg)
}
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
- argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Left())
}
}
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
func (e *Escape) inMutualBatch(fn *ir.Node) bool {
- if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
- if fn.Name.Defn.Esc == EscFuncUnknown {
+ if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged {
+ if fn.Name().Defn.Esc() == EscFuncUnknown {
base.Fatalf("graph inconsistency")
}
return true
// canonicalNode returns the canonical *Node that n logically
// represents.
func canonicalNode(n *ir.Node) *ir.Node {
- if n != nil && n.Op == ir.ONAME && n.Name.IsClosureVar() {
- n = n.Name.Defn
- if n.Name.IsClosureVar() {
+ if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() {
+ n = n.Name().Defn
+ if n.Name().IsClosureVar() {
base.Fatalf("still closure var")
}
}
if e.curfn == nil {
base.Fatalf("e.curfn isn't set")
}
- if n != nil && n.Type != nil && n.Type.NotInHeap() {
- base.ErrorfAt(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
+ if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+ base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
}
n = canonicalNode(n)
}
e.allLocs = append(e.allLocs, loc)
if n != nil {
- if n.Op == ir.ONAME && n.Name.Curfn != e.curfn {
- base.Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
+ if n.Op() == ir.ONAME && n.Name().Curfn != e.curfn {
+ base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn)
}
if n.HasOpt() {
}
if dst.escapes && k.derefs < 0 { // dst = &src
if base.Flag.LowerM >= 2 || logopt.Enabled() {
- pos := base.FmtPos(src.n.Pos)
+ pos := base.FmtPos(src.n.Pos())
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
if logopt.Enabled() {
- logopt.LogOpt(src.n.Pos, "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
}
}
if l.isName(ir.PPARAM) {
if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos), l.n, e.explainLoc(root), derefs)
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, e.explainLoc(root), derefs)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
- logopt.LogOpt(l.n.Pos, "leak", "escape", ir.FuncName(e.curfn),
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e.curfn),
fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), derefs), explanation)
}
}
if addressOf && !l.escapes {
if logopt.Enabled() || base.Flag.LowerM >= 2 {
if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos), l.n)
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
- logopt.LogOpt(l.n.Pos, "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
}
}
l.escapes = true
// explainPath prints an explanation of how src flows to the walk root.
func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
visited := make(map[*EscLocation]bool)
- pos := base.FmtPos(src.n.Pos)
+ pos := base.FmtPos(src.n.Pos())
var explanation []*logopt.LoggedOpt
for {
// Prevent infinite loop.
if logopt.Enabled() {
var epos src.XPos
if notes != nil {
- epos = notes.where.Pos
+ epos = notes.where.Pos()
} else if srcloc != nil && srcloc.n != nil {
- epos = srcloc.n.Pos
+ epos = srcloc.n.Pos()
}
explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e.curfn), flow))
}
for note := notes; note != nil; note = note.next {
if print {
- fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos))
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
}
if logopt.Enabled() {
- explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", ir.FuncName(e.curfn),
+ explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e.curfn),
fmt.Sprintf(" from %v (%v)", note.where, note.why)))
}
}
// TODO(mdempsky): Omit entirely.
return "{temp}"
}
- if l.n.Op == ir.ONAME {
+ if l.n.Op() == ir.ONAME {
return fmt.Sprintf("%v", l.n)
}
return fmt.Sprintf("{storage for %v}", l.n)
//
// var u int // okay to stack allocate
// *(func() *int { return &u }()) = 42
- if containsClosure(other.curfn, l.curfn) && l.curfn.Func.ClosureCalled {
+ if containsClosure(other.curfn, l.curfn) && l.curfn.Func().ClosureCalled {
return false
}
// containsClosure reports whether c is a closure contained within f.
func containsClosure(f, c *ir.Node) bool {
- if f.Op != ir.ODCLFUNC || c.Op != ir.ODCLFUNC {
+ if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC {
base.Fatalf("bad containsClosure: %v, %v", f, c)
}
// Closures within function Foo are named like "Foo.funcN..."
// TODO(mdempsky): Better way to recognize this.
- fn := f.Func.Nname.Sym.Name
- cn := c.Func.Nname.Sym.Name
+ fn := f.Func().Nname.Sym().Name
+ cn := c.Func().Nname.Sym().Name
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
}
// into the escape analysis tag, then record a return leak.
if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
// TODO(mdempsky): Eliminate dependency on Vargen here.
- ri := int(sink.n.Name.Vargen) - 1
+ ri := int(sink.n.Name().Vargen) - 1
if ri < numEscResults {
// Leak to result parameter.
l.paramEsc.AddResult(ri, derefs)
func (e *Escape) finish(fns []*ir.Node) {
// Record parameter tags for package export data.
for _, fn := range fns {
- fn.Esc = EscFuncTagged
+ fn.SetEsc(EscFuncTagged)
narg := 0
for _, fs := range &types.RecvsParams {
- for _, f := range fs(fn.Type).Fields().Slice() {
+ for _, f := range fs(fn.Type()).Fields().Slice() {
narg++
f.Note = e.paramTag(fn, narg, f)
}
// Update n.Esc based on escape analysis results.
if loc.escapes {
- if n.Op != ir.ONAME {
+ if n.Op() != ir.ONAME {
if base.Flag.LowerM != 0 {
- base.WarnfAt(n.Pos, "%S escapes to heap", n)
+ base.WarnfAt(n.Pos(), "%S escapes to heap", n)
}
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "escape", "escape", ir.FuncName(e.curfn))
+ logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn))
}
}
- n.Esc = EscHeap
+ n.SetEsc(EscHeap)
addrescapes(n)
} else {
- if base.Flag.LowerM != 0 && n.Op != ir.ONAME {
- base.WarnfAt(n.Pos, "%S does not escape", n)
+ if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
+ base.WarnfAt(n.Pos(), "%S does not escape", n)
}
- n.Esc = EscNone
+ n.SetEsc(EscNone)
if loc.transient {
n.SetTransient(true)
}
}
func (l *EscLocation) isName(c ir.Class) bool {
- return l.n != nil && l.n.Op == ir.ONAME && l.n.Class() == c
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.Class() == c
}
const numEscResults = 7
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
func funcSym(fn *ir.Node) *types.Sym {
- if fn == nil || fn.Func.Nname == nil {
+ if fn == nil || fn.Func().Nname == nil {
return nil
}
- return fn.Func.Nname.Sym
+ return fn.Func().Nname.Sym()
}
// Mark labels that have no backjumps to them as not increasing e.loopdepth.
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
- if dst.Op != ir.ODEREF && dst.Op != ir.ODOTPTR || dst.Left.Op != ir.ONAME {
+ if dst.Op() != ir.ODEREF && dst.Op() != ir.ODOTPTR || dst.Left().Op() != ir.ONAME {
return false
}
// src is a slice operation.
- switch src.Op {
+ switch src.Op() {
case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
// OK.
case ir.OSLICEARR, ir.OSLICE3ARR:
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
- if src.Left.Op == ir.OADDR {
+ if src.Left().Op() == ir.OADDR {
return false
}
default:
return false
}
// slice is applied to ONAME dereference.
- if src.Left.Op != ir.ODEREF && src.Left.Op != ir.ODOTPTR || src.Left.Left.Op != ir.ONAME {
+ if src.Left().Op() != ir.ODEREF && src.Left().Op() != ir.ODOTPTR || src.Left().Left().Op() != ir.ONAME {
return false
}
// dst and src reference the same base ONAME.
- return dst.Left == src.Left.Left
+ return dst.Left() == src.Left().Left()
}
// isSelfAssign reports whether assignment from src to dst can
//
// These assignments do not change assigned object lifetime.
- if dst == nil || src == nil || dst.Op != src.Op {
+ if dst == nil || src == nil || dst.Op() != src.Op() {
return false
}
- switch dst.Op {
+ switch dst.Op() {
case ir.ODOT, ir.ODOTPTR:
// Safe trailing accessors that are permitted to differ.
case ir.OINDEX:
- if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
+ if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) {
return false
}
default:
}
// The expression prefix must be both "safe" and identical.
- return samesafeexpr(dst.Left, src.Left)
+ return samesafeexpr(dst.Left(), src.Left())
}
// mayAffectMemory reports whether evaluation of n may affect the program's
//
// We're ignoring things like division by zero, index out of range,
// and nil pointer dereference here.
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OCLOSUREVAR, ir.OLITERAL, ir.ONIL:
return false
// Left+Right group.
case ir.OINDEX, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
- return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
+ return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
// Left group.
case ir.ODOT, ir.ODOTPTR, ir.ODEREF, ir.OCONVNOP, ir.OCONV, ir.OLEN, ir.OCAP,
ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- return mayAffectMemory(n.Left)
+ return mayAffectMemory(n.Left())
default:
return true
// heapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
func heapAllocReason(n *ir.Node) string {
- if n.Type == nil {
+ if n.Type() == nil {
return ""
}
// Parameters are always passed via the stack.
- if n.Op == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) {
+ if n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) {
return ""
}
- if n.Type.Width > maxStackVarSize {
+ if n.Type().Width > maxStackVarSize {
return "too large for stack"
}
- if (n.Op == ir.ONEW || n.Op == ir.OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= maxImplicitStackVarSize {
return "too large for stack"
}
- if n.Op == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
+ if n.Op() == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
- if n.Op == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
+ if n.Op() == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
- if n.Op == ir.OMAKESLICE {
- r := n.Right
+ if n.Op() == ir.OMAKESLICE {
+ r := n.Right()
if r == nil {
- r = n.Left
+ r = n.Left()
}
if !smallintconst(r) {
return "non-constant size"
}
- if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
+ if t := n.Type(); t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
return "too large for stack"
}
}
// Storage is allocated as necessary to allow the address
// to be taken.
func addrescapes(n *ir.Node) {
- switch n.Op {
+ switch n.Op() {
default:
// Unexpected Op, probably due to a previous type error. Ignore.
// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
// on PPARAM it means something different.
- if n.Class() == ir.PAUTO && n.Esc == EscNever {
+ if n.Class() == ir.PAUTO && n.Esc() == EscNever {
break
}
// If a closure reference escapes, mark the outer variable as escaping.
- if n.Name.IsClosureVar() {
- addrescapes(n.Name.Defn)
+ if n.Name().IsClosureVar() {
+ addrescapes(n.Name().Defn)
break
}
// then we're analyzing the inner closure but we need to move x to the
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
oldfn := Curfn
- Curfn = n.Name.Curfn
- if Curfn.Op == ir.OCLOSURE {
- Curfn = Curfn.Func.Decl
+ Curfn = n.Name().Curfn
+ if Curfn.Op() == ir.OCLOSURE {
+ Curfn = Curfn.Func().Decl
panic("can't happen")
}
ln := base.Pos
- base.Pos = Curfn.Pos
+ base.Pos = Curfn.Pos()
moveToHeap(n)
Curfn = oldfn
base.Pos = ln
// escape--the pointer inside x does, but that
// is always a heap pointer anyway.
case ir.ODOT, ir.OINDEX, ir.OPAREN, ir.OCONVNOP:
- if !n.Left.Type.IsSlice() {
- addrescapes(n.Left)
+ if !n.Left().Type().IsSlice() {
+ addrescapes(n.Left())
}
}
}
// Allocate a local stack variable to hold the pointer to the heap copy.
// temp will add it to the function declaration list automatically.
- heapaddr := temp(types.NewPtr(n.Type))
- heapaddr.Sym = lookup("&" + n.Sym.Name)
- heapaddr.Orig.Sym = heapaddr.Sym
- heapaddr.Pos = n.Pos
+ heapaddr := temp(types.NewPtr(n.Type()))
+ heapaddr.SetSym(lookup("&" + n.Sym().Name))
+ heapaddr.Orig().SetSym(heapaddr.Sym())
+ heapaddr.SetPos(n.Pos())
// Unset AutoTemp to persist the &foo variable name through SSA to
// liveness analysis.
// TODO(mdempsky/drchase): Cleaner solution?
- heapaddr.Name.SetAutoTemp(false)
+ heapaddr.Name().SetAutoTemp(false)
// Parameters have a local stack copy used at function start/end
// in addition to the copy in the heap that may live longer than
// the function.
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
- if n.Xoffset == types.BADWIDTH {
+ if n.Offset() == types.BADWIDTH {
base.Fatalf("addrescapes before param assignment")
}
// Preserve a copy so we can still write code referring to the original,
// and substitute that copy into the function declaration list
// so that analyses of the local (on-stack) variables use it.
- stackcopy := NewName(n.Sym)
- stackcopy.Type = n.Type
- stackcopy.Xoffset = n.Xoffset
+ stackcopy := NewName(n.Sym())
+ stackcopy.SetType(n.Type())
+ stackcopy.SetOffset(n.Offset())
stackcopy.SetClass(n.Class())
- stackcopy.Name.Param.Heapaddr = heapaddr
+ stackcopy.Name().Param.Heapaddr = heapaddr
if n.Class() == ir.PPARAMOUT {
// Make sure the pointer to the heap copy is kept live throughout the function.
// The function could panic at any point, and then a defer could recover.
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
- heapaddr.Name.SetIsOutputParamHeapAddr(true)
+ heapaddr.Name().SetIsOutputParamHeapAddr(true)
}
- n.Name.Param.Stackcopy = stackcopy
+ n.Name().Param.Stackcopy = stackcopy
// Substitute the stackcopy into the function variable list so that
// liveness and other analyses use the underlying stack slot
// and not the now-pseudo-variable n.
found := false
- for i, d := range Curfn.Func.Dcl {
+ for i, d := range Curfn.Func().Dcl {
if d == n {
- Curfn.Func.Dcl[i] = stackcopy
+ Curfn.Func().Dcl[i] = stackcopy
found = true
break
}
if !found {
base.Fatalf("cannot find %v in local variable list", n)
}
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
n.SetClass(ir.PAUTOHEAP)
- n.Xoffset = 0
- n.Name.Param.Heapaddr = heapaddr
- n.Esc = EscHeap
+ n.SetOffset(0)
+ n.Name().Param.Heapaddr = heapaddr
+ n.SetEsc(EscHeap)
if base.Flag.LowerM != 0 {
- base.WarnfAt(n.Pos, "moved to heap: %v", n)
+ base.WarnfAt(n.Pos(), "moved to heap: %v", n)
}
}
return fmt.Sprintf("arg#%d", narg)
}
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
// Assume that uintptr arguments must be held live across the call.
// This is most important for syscall.Syscall.
// See golang.org/issue/13372.
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
- if fn.Func.Pragma&ir.Noescape != 0 {
+ if fn.Func().Pragma&ir.Noescape != 0 {
if base.Flag.LowerM != 0 && f.Sym != nil {
base.WarnfAt(f.Pos, "%v does not escape", name())
}
return esc.Encode()
}
- if fn.Func.Pragma&ir.UintptrEscapes != 0 {
+ if fn.Func().Pragma&ir.UintptrEscapes != 0 {
if f.Type.IsUintptr() {
if base.Flag.LowerM != 0 {
base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
}
for i := 0; i < numEscResults; i++ {
if x := esc.Result(i); x >= 0 {
- res := fn.Type.Results().Field(i).Sym
+ res := fn.Type().Results().Field(i).Sym
base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
}
}
// exportsym marks n for export (or reexport).
func exportsym(n *ir.Node) {
- if n.Sym.OnExportList() {
+ if n.Sym().OnExportList() {
return
}
- n.Sym.SetOnExportList(true)
+ n.Sym().SetOnExportList(true)
if base.Flag.E != 0 {
- fmt.Printf("export symbol %v\n", n.Sym)
+ fmt.Printf("export symbol %v\n", n.Sym())
}
exportlist = append(exportlist, n)
}
func autoexport(n *ir.Node, ctxt ir.Class) {
- if n.Sym.Pkg != ir.LocalPkg {
+ if n.Sym().Pkg != ir.LocalPkg {
return
}
if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN {
return
}
- if n.Type != nil && n.Type.IsKind(types.TFUNC) && ir.IsMethod(n) {
+ if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
return
}
- if types.IsExported(n.Sym.Name) || initname(n.Sym.Name) {
+ if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) {
exportsym(n)
}
- if base.Flag.AsmHdr != "" && !n.Sym.Asm() {
- n.Sym.SetAsm(true)
+ if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
+ n.Sym().SetAsm(true)
asmlist = append(asmlist, n)
}
}
s.SetPkgDef(ir.AsTypesNode(n))
s.Importdef = ipkg
}
- if n.Op != ir.ONONAME && n.Op != op {
+ if n.Op() != ir.ONONAME && n.Op() != op {
redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return n
// ipkg is the package being imported
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *types.Type {
n := importsym(ipkg, s, ir.OTYPE)
- if n.Op != ir.OTYPE {
+ if n.Op() != ir.OTYPE {
t := types.New(types.TFORW)
t.Sym = s
t.Nod = ir.AsTypesNode(n)
- n.Op = ir.OTYPE
- n.Pos = pos
- n.Type = t
+ n.SetOp(ir.OTYPE)
+ n.SetPos(pos)
+ n.SetType(t)
n.SetClass(ir.PEXTERN)
}
- t := n.Type
+ t := n.Type()
if t == nil {
base.Fatalf("importtype %v", s)
}
// ipkg is the package being imported
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Node {
n := importsym(ipkg, s, op)
- if n.Op != ir.ONONAME {
- if n.Op == op && (n.Class() != ctxt || !types.Identical(n.Type, t)) {
+ if n.Op() != ir.ONONAME {
+ if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) {
redeclare(base.Pos, s, fmt.Sprintf("during import %q", ipkg.Path))
}
return nil
}
- n.Op = op
- n.Pos = pos
+ n.SetOp(op)
+ n.SetPos(pos)
n.SetClass(ctxt)
if ctxt == ir.PFUNC {
- n.Sym.SetFunc(true)
+ n.Sym().SetFunc(true)
}
- n.Type = t
+ n.SetType(t)
return n
}
return
}
- n.Func = new(ir.Func)
+ n.SetFunc(new(ir.Func))
if base.Flag.E != 0 {
fmt.Printf("import func %v%S\n", s, t)
}
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", ir.LocalPkg.Name)
for _, n := range asmlist {
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
continue
}
- switch n.Op {
+ switch n.Op() {
case ir.OLITERAL:
t := n.Val().Kind()
if t == constant.Float || t == constant.Complex {
break
}
- fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
+ fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym().Name, n.Val())
case ir.OTYPE:
- t := n.Type
+ t := n.Type()
if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
break
}
- fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width))
+ fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Width))
for _, f := range t.Fields().Slice() {
if !f.Sym.IsBlank() {
- fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset))
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
}
}
}
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
func isParamStackCopy(n *ir.Node) bool {
- return n.Op == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name.Param.Heapaddr != nil
+ return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Param.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
func isParamHeapCopy(n *ir.Node) bool {
- return n.Op == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name.Param.Stackcopy != nil
+ return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy != nil
}
// autotmpname returns the name for an autotmp variable numbered n.
if curfn == nil {
base.Fatalf("no curfn for tempAt")
}
- if curfn.Op == ir.OCLOSURE {
+ if curfn.Op() == ir.OCLOSURE {
ir.Dump("tempAt", curfn)
base.Fatalf("adding tempAt to wrong closure function")
}
}
s := &types.Sym{
- Name: autotmpname(len(curfn.Func.Dcl)),
+ Name: autotmpname(len(curfn.Func().Dcl)),
Pkg: ir.LocalPkg,
}
n := ir.NewNameAt(pos, s)
s.Def = ir.AsTypesNode(n)
- n.Type = t
+ n.SetType(t)
n.SetClass(ir.PAUTO)
- n.Esc = EscNever
- n.Name.Curfn = curfn
- n.Name.SetUsed(true)
- n.Name.SetAutoTemp(true)
- curfn.Func.Dcl = append(curfn.Func.Dcl, n)
+ n.SetEsc(EscNever)
+ n.Name().Curfn = curfn
+ n.Name().SetUsed(true)
+ n.Name().SetAutoTemp(true)
+ curfn.Func().Dcl = append(curfn.Func().Dcl, n)
dowidth(t)
- return n.Orig
+ return n.Orig()
}
func temp(t *types.Type) *ir.Node {
pp.next = pp.NewProg()
pp.clearp(pp.next)
- pp.pos = fn.Pos
+ pp.pos = fn.Pos()
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, false}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
- fn.Func.LSym.Func().Text = ptxt
+ fn.Func().LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
- ptxt.From.Sym = fn.Func.LSym
+ ptxt.From.Sym = fn.Func().LSym
}
// initLSym defines f's obj.LSym and initializes it based on the
}
if nam := f.Nname; !ir.IsBlank(nam) {
- f.LSym = nam.Sym.Linksym()
+ f.LSym = nam.Sym().Linksym()
if f.Pragma&ir.Systemstack != 0 {
f.LSym.Set(obj.AttrCFunc, true)
}
}
}
- isLinknameExported := nam.Sym.Linkname != "" && (hasBody || hasDefABI)
+ isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
if base.Ctxt.Pkgpath == "reflect" {
- switch f.Nname.Sym.Name {
+ switch f.Nname.Sym().Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
}
func ggloblnod(nam *ir.Node) {
- s := nam.Sym.Linksym()
+ s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
- if nam.Name.Readonly() {
+ if nam.Name().Readonly() {
flags = obj.RODATA
}
- if nam.Type != nil && !nam.Type.HasPointers() {
+ if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
- base.Ctxt.Globl(s, nam.Type.Width, flags)
- if nam.Name.LibfuzzerExtraCounter() {
+ base.Ctxt.Globl(s, nam.Type().Width, flags)
+ if nam.Name().LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
- if nam.Sym.Linkname != "" {
+ if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
}
for n := range index {
- pkgObjs[n.Sym.Pkg] = append(pkgObjs[n.Sym.Pkg], n)
+ pkgObjs[n.Sym().Pkg] = append(pkgObjs[n.Sym().Pkg], n)
}
var pkgs []*types.Pkg
pkgs = append(pkgs, pkg)
sort.Slice(objs, func(i, j int) bool {
- return objs[i].Sym.Name < objs[j].Sym.Name
+ return objs[i].Sym().Name < objs[j].Sym().Name
})
}
objs := pkgObjs[pkg]
w.uint64(uint64(len(objs)))
for _, n := range objs {
- w.string(n.Sym.Name)
+ w.string(n.Sym().Name)
w.uint64(index[n])
}
}
// pushDecl adds n to the declaration work queue, if not already present.
func (p *iexporter) pushDecl(n *ir.Node) {
- if n.Sym == nil || ir.AsNode(n.Sym.Def) != n && n.Op != ir.OTYPE {
- base.Fatalf("weird Sym: %v, %v", n, n.Sym)
+ if n.Sym() == nil || ir.AsNode(n.Sym().Def) != n && n.Op() != ir.OTYPE {
+ base.Fatalf("weird Sym: %v, %v", n, n.Sym())
}
// Don't export predeclared declarations.
- if n.Sym.Pkg == ir.BuiltinPkg || n.Sym.Pkg == unsafepkg {
+ if n.Sym().Pkg == ir.BuiltinPkg || n.Sym().Pkg == unsafepkg {
return
}
func (p *iexporter) doDecl(n *ir.Node) {
w := p.newWriter()
- w.setPkg(n.Sym.Pkg, false)
+ w.setPkg(n.Sym().Pkg, false)
- switch n.Op {
+ switch n.Op() {
case ir.ONAME:
switch n.Class() {
case ir.PEXTERN:
// Variable.
w.tag('V')
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
w.varExt(n)
case ir.PFUNC:
// Function.
w.tag('F')
- w.pos(n.Pos)
- w.signature(n.Type)
+ w.pos(n.Pos())
+ w.signature(n.Type())
w.funcExt(n)
default:
// Constant.
n = typecheck(n, ctxExpr)
w.tag('C')
- w.pos(n.Pos)
- w.value(n.Type, n.Val())
+ w.pos(n.Pos())
+ w.value(n.Type(), n.Val())
case ir.OTYPE:
- if IsAlias(n.Sym) {
+ if IsAlias(n.Sym()) {
// Alias.
w.tag('A')
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
break
}
// Defined type.
w.tag('T')
- w.pos(n.Pos)
+ w.pos(n.Pos())
- underlying := n.Type.Orig
+ underlying := n.Type().Orig
if underlying == types.Errortype.Orig {
// For "type T error", use error as the
// underlying type instead of error's own
}
w.typ(underlying)
- t := n.Type
+ t := n.Type()
if t.IsInterface() {
w.typeExt(t)
break
w := p.newWriter()
w.setPkg(fnpkg(f), false)
- w.stmtList(ir.AsNodes(f.Func.Inl.Body))
+ w.stmtList(ir.AsNodes(f.Func().Inl.Body))
p.inlineIndex[f] = w.flush()
}
// Ensure any referenced declarations are written out too.
w.p.pushDecl(n)
- s := n.Sym
+ s := n.Sym()
w.string(s.Name)
w.pkg(s.Pkg)
}
// Compiler-specific extensions.
func (w *exportWriter) varExt(n *ir.Node) {
- w.linkname(n.Sym)
- w.symIdx(n.Sym)
+ w.linkname(n.Sym())
+ w.symIdx(n.Sym())
}
func (w *exportWriter) funcExt(n *ir.Node) {
- w.linkname(n.Sym)
- w.symIdx(n.Sym)
+ w.linkname(n.Sym())
+ w.symIdx(n.Sym())
// Escape analysis.
for _, fs := range &types.RecvsParams {
- for _, f := range fs(n.Type).FieldSlice() {
+ for _, f := range fs(n.Type()).FieldSlice() {
w.string(f.Note)
}
}
// Inline body.
- if n.Func.Inl != nil {
- w.uint64(1 + uint64(n.Func.Inl.Cost))
- if n.Func.ExportInline() {
+ if n.Func().Inl != nil {
+ w.uint64(1 + uint64(n.Func().Inl.Cost))
+ if n.Func().ExportInline() {
w.p.doInline(n)
}
// Endlineno for inlined function.
- if n.Name.Defn != nil {
- w.pos(n.Name.Defn.Func.Endlineno)
+ if n.Name().Defn != nil {
+ w.pos(n.Name().Defn.Func().Endlineno)
} else {
// When the exported node was defined externally,
// e.g. io exports atomic.(*Value).Load or bytes exports errors.New.
// Keep it as we don't distinguish this case in iimport.go.
- w.pos(n.Func.Endlineno)
+ w.pos(n.Func().Endlineno)
}
} else {
w.uint64(0)
}
func (w *exportWriter) node(n *ir.Node) {
- if ir.OpPrec[n.Op] < 0 {
+ if ir.OpPrec[n.Op()] < 0 {
w.stmt(n)
} else {
w.expr(n)
// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
func (w *exportWriter) stmt(n *ir.Node) {
- if n.Ninit.Len() > 0 && !ir.StmtWithInit(n.Op) {
+ if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) {
// can't use stmtList here since we don't want the final OEND
- for _, n := range n.Ninit.Slice() {
+ for _, n := range n.Init().Slice() {
w.stmt(n)
}
}
- switch op := n.Op; op {
+ switch op := n.Op(); op {
case ir.ODCL:
w.op(ir.ODCL)
- w.pos(n.Left.Pos)
- w.localName(n.Left)
- w.typ(n.Left.Type)
+ w.pos(n.Left().Pos())
+ w.localName(n.Left())
+ w.typ(n.Left().Type())
// case ODCLFIELD:
// unimplemented - handled by default case
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typecheck to reproduce
// the "v = <N>" again.
- if n.Right != nil {
+ if n.Right() != nil {
w.op(ir.OAS)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
}
case ir.OASOP:
w.op(ir.OASOP)
- w.pos(n.Pos)
+ w.pos(n.Pos())
w.op(n.SubOp())
- w.expr(n.Left)
+ w.expr(n.Left())
if w.bool(!n.Implicit()) {
- w.expr(n.Right)
+ w.expr(n.Right())
}
case ir.OAS2:
w.op(ir.OAS2)
- w.pos(n.Pos)
- w.exprList(n.List)
- w.exprList(n.Rlist)
+ w.pos(n.Pos())
+ w.exprList(n.List())
+ w.exprList(n.Rlist())
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
w.op(ir.OAS2)
- w.pos(n.Pos)
- w.exprList(n.List)
- w.exprList(ir.AsNodes([]*ir.Node{n.Right}))
+ w.pos(n.Pos())
+ w.exprList(n.List())
+ w.exprList(ir.AsNodes([]*ir.Node{n.Right()}))
case ir.ORETURN:
w.op(ir.ORETURN)
- w.pos(n.Pos)
- w.exprList(n.List)
+ w.pos(n.Pos())
+ w.exprList(n.List())
// case ORETJMP:
// unreachable - generated by compiler for trampolin routines
case ir.OGO, ir.ODEFER:
w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.pos(n.Pos())
+ w.expr(n.Left())
case ir.OIF:
w.op(ir.OIF)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.expr(n.Left)
- w.stmtList(n.Nbody)
- w.stmtList(n.Rlist)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.Left())
+ w.stmtList(n.Body())
+ w.stmtList(n.Rlist())
case ir.OFOR:
w.op(ir.OFOR)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.exprsOrNil(n.Left, n.Right)
- w.stmtList(n.Nbody)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Left(), n.Right())
+ w.stmtList(n.Body())
case ir.ORANGE:
w.op(ir.ORANGE)
- w.pos(n.Pos)
- w.stmtList(n.List)
- w.expr(n.Right)
- w.stmtList(n.Nbody)
+ w.pos(n.Pos())
+ w.stmtList(n.List())
+ w.expr(n.Right())
+ w.stmtList(n.Body())
case ir.OSELECT, ir.OSWITCH:
w.op(op)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.exprsOrNil(n.Left, nil)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Left(), nil)
w.caseList(n)
// case OCASE:
case ir.OFALL:
w.op(ir.OFALL)
- w.pos(n.Pos)
+ w.pos(n.Pos())
case ir.OBREAK, ir.OCONTINUE:
w.op(op)
- w.pos(n.Pos)
- w.exprsOrNil(n.Left, nil)
+ w.pos(n.Pos())
+ w.exprsOrNil(n.Left(), nil)
case ir.OEMPTY:
// nothing to emit
case ir.OGOTO, ir.OLABEL:
w.op(op)
- w.pos(n.Pos)
- w.string(n.Sym.Name)
+ w.pos(n.Pos())
+ w.string(n.Sym().Name)
default:
- base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op)
+ base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op())
}
}
func (w *exportWriter) caseList(sw *ir.Node) {
- namedTypeSwitch := sw.Op == ir.OSWITCH && sw.Left != nil && sw.Left.Op == ir.OTYPESW && sw.Left.Left != nil
+ namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
- cases := sw.List.Slice()
+ cases := sw.List().Slice()
w.uint64(uint64(len(cases)))
for _, cas := range cases {
- if cas.Op != ir.OCASE {
+ if cas.Op() != ir.OCASE {
base.Fatalf("expected OCASE, got %v", cas)
}
- w.pos(cas.Pos)
- w.stmtList(cas.List)
+ w.pos(cas.Pos())
+ w.stmtList(cas.List())
if namedTypeSwitch {
- w.localName(cas.Rlist.First())
+ w.localName(cas.Rlist().First())
}
- w.stmtList(cas.Nbody)
+ w.stmtList(cas.Body())
}
}
// }
// from exprfmt (fmt.go)
- for n.Op == ir.OPAREN || n.Implicit() && (n.Op == ir.ODEREF || n.Op == ir.OADDR || n.Op == ir.ODOT || n.Op == ir.ODOTPTR) {
- n = n.Left
+ for n.Op() == ir.OPAREN || n.Implicit() && (n.Op() == ir.ODEREF || n.Op() == ir.OADDR || n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR) {
+ n = n.Left()
}
- switch op := n.Op; op {
+ switch op := n.Op(); op {
// expressions
// (somewhat closely following the structure of exprfmt in fmt.go)
case ir.ONIL:
- if !n.Type.HasNil() {
- base.Fatalf("unexpected type for nil: %v", n.Type)
+ if !n.Type().HasNil() {
+ base.Fatalf("unexpected type for nil: %v", n.Type())
}
- if n.Orig != nil && n.Orig != n {
- w.expr(n.Orig)
+ if n.Orig() != nil && n.Orig() != n {
+ w.expr(n.Orig())
break
}
w.op(ir.OLITERAL)
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
case ir.OLITERAL:
w.op(ir.OLITERAL)
- w.pos(n.Pos)
- w.value(n.Type, n.Val())
+ w.pos(n.Pos())
+ w.value(n.Type(), n.Val())
case ir.OMETHEXPR:
// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
// but for export, this should be rendered as (*pkg.T).meth.
// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
w.op(ir.OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left) // n.Left.Op == OTYPE
- w.selector(n.Right.Sym)
+ w.pos(n.Pos())
+ w.expr(n.Left()) // n.Left.Op == OTYPE
+ w.selector(n.Right().Sym())
case ir.ONAME:
// Package scope name.
case ir.OTYPE:
w.op(ir.OTYPE)
- w.typ(n.Type)
+ w.typ(n.Type())
case ir.OTYPESW:
w.op(ir.OTYPESW)
- w.pos(n.Pos)
+ w.pos(n.Pos())
var s *types.Sym
- if n.Left != nil {
- if n.Left.Op != ir.ONONAME {
- base.Fatalf("expected ONONAME, got %v", n.Left)
+ if n.Left() != nil {
+ if n.Left().Op() != ir.ONONAME {
+ base.Fatalf("expected ONONAME, got %v", n.Left())
}
- s = n.Left.Sym
+ s = n.Left().Sym()
}
w.localIdent(s, 0) // declared pseudo-variable, if any
- w.exprsOrNil(n.Right, nil)
+ w.exprsOrNil(n.Right(), nil)
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
// should have been resolved by typechecking - handled by default case
case ir.OPTRLIT:
w.op(ir.OADDR)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.pos(n.Pos())
+ w.expr(n.Left())
case ir.OSTRUCTLIT:
w.op(ir.OSTRUCTLIT)
- w.pos(n.Pos)
- w.typ(n.Type)
- w.elemList(n.List) // special handling of field names
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.elemList(n.List()) // special handling of field names
case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
w.op(ir.OCOMPLIT)
- w.pos(n.Pos)
- w.typ(n.Type)
- w.exprList(n.List)
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.exprList(n.List())
case ir.OKEY:
w.op(ir.OKEY)
- w.pos(n.Pos)
- w.exprsOrNil(n.Left, n.Right)
+ w.pos(n.Pos())
+ w.exprsOrNil(n.Left(), n.Right())
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
case ir.OCALLPART:
// An OCALLPART is an OXDOT before type checking.
w.op(ir.OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.pos(n.Pos())
+ w.expr(n.Left())
// Right node should be ONAME
- w.selector(n.Right.Sym)
+ w.selector(n.Right().Sym())
case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH:
w.op(ir.OXDOT)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.selector(n.Sym)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.selector(n.Sym())
case ir.ODOTTYPE, ir.ODOTTYPE2:
w.op(ir.ODOTTYPE)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.typ(n.Type())
case ir.OINDEX, ir.OINDEXMAP:
w.op(ir.OINDEX)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR:
w.op(ir.OSLICE)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.pos(n.Pos())
+ w.expr(n.Left())
low, high, _ := n.SliceBounds()
w.exprsOrNil(low, high)
case ir.OSLICE3, ir.OSLICE3ARR:
w.op(ir.OSLICE3)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.pos(n.Pos())
+ w.expr(n.Left())
low, high, max := n.SliceBounds()
w.exprsOrNil(low, high)
w.expr(max)
case ir.OCOPY, ir.OCOMPLEX:
// treated like other builtin calls (see e.g., OREAL)
w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
w.op(ir.OEND)
case ir.OCONV, ir.OCONVIFACE, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR:
w.op(ir.OCONV)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.typ(n.Type())
case ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
w.op(op)
- w.pos(n.Pos)
- if n.Left != nil {
- w.expr(n.Left)
+ w.pos(n.Pos())
+ if n.Left() != nil {
+ w.expr(n.Left())
w.op(ir.OEND)
} else {
- w.exprList(n.List) // emits terminating OEND
+ w.exprList(n.List()) // emits terminating OEND
}
// only append() calls may contain '...' arguments
if op == ir.OAPPEND {
case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG:
w.op(ir.OCALL)
- w.pos(n.Pos)
- w.stmtList(n.Ninit)
- w.expr(n.Left)
- w.exprList(n.List)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.Left())
+ w.exprList(n.List())
w.bool(n.IsDDD())
case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
w.op(op) // must keep separate from OMAKE for importer
- w.pos(n.Pos)
- w.typ(n.Type)
+ w.pos(n.Pos())
+ w.typ(n.Type())
switch {
default:
// empty list
w.op(ir.OEND)
- case n.List.Len() != 0: // pre-typecheck
- w.exprList(n.List) // emits terminating OEND
- case n.Right != nil:
- w.expr(n.Left)
- w.expr(n.Right)
+ case n.List().Len() != 0: // pre-typecheck
+ w.exprList(n.List()) // emits terminating OEND
+ case n.Right() != nil:
+ w.expr(n.Left())
+ w.expr(n.Right())
w.op(ir.OEND)
- case n.Left != nil && (n.Op == ir.OMAKESLICE || !n.Left.Type.IsUntyped()):
- w.expr(n.Left)
+ case n.Left() != nil && (n.Op() == ir.OMAKESLICE || !n.Left().Type().IsUntyped()):
+ w.expr(n.Left())
w.op(ir.OEND)
}
// unary expressions
case ir.OPLUS, ir.ONEG, ir.OADDR, ir.OBITNOT, ir.ODEREF, ir.ONOT, ir.ORECV:
w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
+ w.pos(n.Pos())
+ w.expr(n.Left())
// binary expressions
case ir.OADD, ir.OAND, ir.OANDAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.OOROR, ir.ORSH, ir.OSEND, ir.OSUB, ir.OXOR:
w.op(op)
- w.pos(n.Pos)
- w.expr(n.Left)
- w.expr(n.Right)
+ w.pos(n.Pos())
+ w.expr(n.Left())
+ w.expr(n.Right())
case ir.OADDSTR:
w.op(ir.OADDSTR)
- w.pos(n.Pos)
- w.exprList(n.List)
+ w.pos(n.Pos())
+ w.exprList(n.List())
case ir.ODCLCONST:
// if exporting, DCLCONST should just be removed as its usage
default:
base.Fatalf("cannot export %v (%d) node\n"+
- "\t==> please file an issue and assign to gri@", n.Op, int(n.Op))
+ "\t==> please file an issue and assign to gri@", n.Op(), int(n.Op()))
}
}
func (w *exportWriter) elemList(list ir.Nodes) {
w.uint64(uint64(list.Len()))
for _, n := range list.Slice() {
- w.selector(n.Sym)
- w.expr(n.Left)
+ w.selector(n.Sym())
+ w.expr(n.Left())
}
}
// PPARAM/PPARAMOUT, because we only want to include vargen in
// non-param names.
var v int32
- if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name.Param.Stackcopy == nil) {
- v = n.Name.Vargen
+ if n.Class() == ir.PAUTO || (n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy == nil) {
+ v = n.Name().Vargen
}
- w.localIdent(n.Sym, v)
+ w.localIdent(n.Sym(), v)
}
func (w *exportWriter) localIdent(s *types.Sym, v int32) {
)
func expandDecl(n *ir.Node) {
- if n.Op != ir.ONONAME {
+ if n.Op() != ir.ONONAME {
return
}
}
func expandInline(fn *ir.Node) {
- if fn.Func.Inl.Body != nil {
+ if fn.Func().Inl.Body != nil {
return
}
}
func importReaderFor(n *ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
- x, ok := importers[n.Sym]
+ x, ok := importers[n.Sym()]
if !ok {
return nil
}
- return x.p.newReader(x.off, n.Sym.Pkg)
+ return x.p.newReader(x.off, n.Sym().Pkg)
}
type intReader struct {
}
func (r *importReader) doDecl(n *ir.Node) {
- if n.Op != ir.ONONAME {
- base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym, n.Op)
+ if n.Op() != ir.ONONAME {
+ base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym(), n.Op())
}
tag := r.byte()
case 'A':
typ := r.typ()
- importalias(r.p.ipkg, pos, n.Sym, typ)
+ importalias(r.p.ipkg, pos, n.Sym(), typ)
case 'C':
typ := r.typ()
val := r.value(typ)
- importconst(r.p.ipkg, pos, n.Sym, typ, val)
+ importconst(r.p.ipkg, pos, n.Sym(), typ, val)
case 'F':
typ := r.signature(nil)
- importfunc(r.p.ipkg, pos, n.Sym, typ)
+ importfunc(r.p.ipkg, pos, n.Sym(), typ)
r.funcExt(n)
case 'T':
// Types can be recursive. We need to setup a stub
// declaration before recursing.
- t := importtype(r.p.ipkg, pos, n.Sym)
+ t := importtype(r.p.ipkg, pos, n.Sym())
// We also need to defer width calculations until
// after the underlying type has been assigned.
mtyp := r.signature(recv)
m := newfuncnamel(mpos, methodSym(recv.Type, msym), new(ir.Func))
- m.Type = mtyp
+ m.SetType(mtyp)
m.SetClass(ir.PFUNC)
// methodSym already marked m.Sym as a function.
case 'V':
typ := r.typ()
- importvar(r.p.ipkg, pos, n.Sym, typ)
+ importvar(r.p.ipkg, pos, n.Sym(), typ)
r.varExt(n)
default:
// types. Therefore, this must be a package-scope
// type.
n := ir.AsNode(r.qualifiedIdent().PkgDef())
- if n.Op == ir.ONONAME {
+ if n.Op() == ir.ONONAME {
expandDecl(n)
}
- if n.Op != ir.OTYPE {
- base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op, n.Sym, n)
+ if n.Op() != ir.OTYPE {
+ base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
}
- return n.Type
+ return n.Type()
case pointerType:
return types.NewPtr(r.typ())
case sliceType:
// Compiler-specific extensions.
func (r *importReader) varExt(n *ir.Node) {
- r.linkname(n.Sym)
- r.symIdx(n.Sym)
+ r.linkname(n.Sym())
+ r.symIdx(n.Sym())
}
func (r *importReader) funcExt(n *ir.Node) {
- r.linkname(n.Sym)
- r.symIdx(n.Sym)
+ r.linkname(n.Sym())
+ r.symIdx(n.Sym())
// Escape analysis.
for _, fs := range &types.RecvsParams {
- for _, f := range fs(n.Type).FieldSlice() {
+ for _, f := range fs(n.Type()).FieldSlice() {
f.Note = r.string()
}
}
// Inline body.
if u := r.uint64(); u > 0 {
- n.Func.Inl = &ir.Inline{
+ n.Func().Inl = &ir.Inline{
Cost: int32(u - 1),
}
- n.Func.Endlineno = r.pos()
+ n.Func().Endlineno = r.pos()
}
}
var typeSymIdx = make(map[*types.Type][2]int64)
func (r *importReader) doInline(n *ir.Node) {
- if len(n.Func.Inl.Body) != 0 {
+ if len(n.Func().Inl.Body) != 0 {
base.Fatalf("%v already has inline body", n)
}
// functions).
body = []*ir.Node{}
}
- n.Func.Inl.Body = body
+ n.Func().Inl.Body = body
importlist = append(importlist, n)
if base.Flag.E > 0 && base.Flag.LowerM > 2 {
if base.Flag.LowerM > 3 {
- fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type, ir.AsNodes(n.Func.Inl.Body))
+ fmt.Printf("inl body for %v %#v: %+v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body))
} else {
- fmt.Printf("inl body for %v %#v: %v\n", n, n.Type, ir.AsNodes(n.Func.Inl.Body))
+ fmt.Printf("inl body for %v %#v: %v\n", n, n.Type(), ir.AsNodes(n.Func().Inl.Body))
}
}
}
break
}
// OBLOCK nodes may be created when importing ODCL nodes - unpack them
- if n.Op == ir.OBLOCK {
- list = append(list, n.List.Slice()...)
+ if n.Op() == ir.OBLOCK {
+ list = append(list, n.List().Slice()...)
} else {
list = append(list, n)
}
}
func (r *importReader) caseList(sw *ir.Node) []*ir.Node {
- namedTypeSwitch := sw.Op == ir.OSWITCH && sw.Left != nil && sw.Left.Op == ir.OTYPESW && sw.Left.Left != nil
+ namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
cases := make([]*ir.Node, r.uint64())
for i := range cases {
cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil)
- cas.List.Set(r.stmtList())
+ cas.PtrList().Set(r.stmtList())
if namedTypeSwitch {
// Note: per-case variables will have distinct, dotted
// names after import. That's okay: swt.go only needs
// Sym for diagnostics anyway.
- caseVar := ir.NewNameAt(cas.Pos, r.ident())
+ caseVar := ir.NewNameAt(cas.Pos(), r.ident())
declare(caseVar, dclcontext)
- cas.Rlist.Set1(caseVar)
- caseVar.Name.Defn = sw.Left
+ cas.PtrRlist().Set1(caseVar)
+ caseVar.Name().Defn = sw.Left()
}
- cas.Nbody.Set(r.stmtList())
+ cas.PtrBody().Set(r.stmtList())
cases[i] = cas
}
return cases
func (r *importReader) expr() *ir.Node {
n := r.node()
- if n != nil && n.Op == ir.OBLOCK {
+ if n != nil && n.Op() == ir.OBLOCK {
base.Fatalf("unexpected block node: %v", n)
}
return n
n = ir.NewLiteral(r.value(typ))
}
n = npos(pos, n)
- n.Type = typ
+ n.SetType(typ)
return n
case ir.ONONAME:
case ir.OTYPESW:
n := ir.NodAt(r.pos(), ir.OTYPESW, nil, nil)
if s := r.ident(); s != nil {
- n.Left = npos(n.Pos, newnoname(s))
+ n.SetLeft(npos(n.Pos(), newnoname(s)))
}
right, _ := r.exprsOrNil()
- n.Right = right
+ n.SetRight(right)
return n
// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
savedlineno := base.Pos
base.Pos = r.pos()
n := ir.NodAt(base.Pos, ir.OCOMPLIT, nil, typenod(r.typ()))
- n.List.Set(r.elemList()) // special handling of field names
+ n.PtrList().Set(r.elemList()) // special handling of field names
base.Pos = savedlineno
return n
case ir.OCOMPLIT:
n := ir.NodAt(r.pos(), ir.OCOMPLIT, nil, typenod(r.typ()))
- n.List.Set(r.exprList())
+ n.PtrList().Set(r.exprList())
return n
case ir.OKEY:
case ir.ODOTTYPE:
n := ir.NodAt(r.pos(), ir.ODOTTYPE, r.expr(), nil)
- n.Type = r.typ()
+ n.SetType(r.typ())
return n
// case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
n := ir.NodAt(r.pos(), op, r.expr(), nil)
low, high := r.exprsOrNil()
var max *ir.Node
- if n.Op.IsSlice3() {
+ if n.Op().IsSlice3() {
max = r.expr()
}
n.SetSliceBounds(low, high, max)
case ir.OCONV:
n := ir.NodAt(r.pos(), ir.OCONV, r.expr(), nil)
- n.Type = r.typ()
+ n.SetType(r.typ())
return n
case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
n := npos(r.pos(), builtinCall(op))
- n.List.Set(r.exprList())
+ n.PtrList().Set(r.exprList())
if op == ir.OAPPEND {
n.SetIsDDD(r.bool())
}
case ir.OCALL:
n := ir.NodAt(r.pos(), ir.OCALL, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left = r.expr()
- n.List.Set(r.exprList())
+ n.PtrInit().Set(r.stmtList())
+ n.SetLeft(r.expr())
+ n.PtrList().Set(r.exprList())
n.SetIsDDD(r.bool())
return n
case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
n := npos(r.pos(), builtinCall(ir.OMAKE))
- n.List.Append(typenod(r.typ()))
- n.List.Append(r.exprList()...)
+ n.PtrList().Append(typenod(r.typ()))
+ n.PtrList().Append(r.exprList()...)
return n
// unary expressions
case ir.OASOP:
n := ir.NodAt(r.pos(), ir.OASOP, nil, nil)
n.SetSubOp(r.op())
- n.Left = r.expr()
+ n.SetLeft(r.expr())
if !r.bool() {
- n.Right = nodintconst(1)
+ n.SetRight(nodintconst(1))
n.SetImplicit(true)
} else {
- n.Right = r.expr()
+ n.SetRight(r.expr())
}
return n
case ir.OAS2:
n := ir.NodAt(r.pos(), ir.OAS2, nil, nil)
- n.List.Set(r.exprList())
- n.Rlist.Set(r.exprList())
+ n.PtrList().Set(r.exprList())
+ n.PtrRlist().Set(r.exprList())
return n
case ir.ORETURN:
n := ir.NodAt(r.pos(), ir.ORETURN, nil, nil)
- n.List.Set(r.exprList())
+ n.PtrList().Set(r.exprList())
return n
// case ORETJMP:
case ir.OIF:
n := ir.NodAt(r.pos(), ir.OIF, nil, nil)
- n.Ninit.Set(r.stmtList())
- n.Left = r.expr()
- n.Nbody.Set(r.stmtList())
- n.Rlist.Set(r.stmtList())
+ n.PtrInit().Set(r.stmtList())
+ n.SetLeft(r.expr())
+ n.PtrBody().Set(r.stmtList())
+ n.PtrRlist().Set(r.stmtList())
return n
case ir.OFOR:
n := ir.NodAt(r.pos(), ir.OFOR, nil, nil)
- n.Ninit.Set(r.stmtList())
+ n.PtrInit().Set(r.stmtList())
left, right := r.exprsOrNil()
- n.Left = left
- n.Right = right
- n.Nbody.Set(r.stmtList())
+ n.SetLeft(left)
+ n.SetRight(right)
+ n.PtrBody().Set(r.stmtList())
return n
case ir.ORANGE:
n := ir.NodAt(r.pos(), ir.ORANGE, nil, nil)
- n.List.Set(r.stmtList())
- n.Right = r.expr()
- n.Nbody.Set(r.stmtList())
+ n.PtrList().Set(r.stmtList())
+ n.SetRight(r.expr())
+ n.PtrBody().Set(r.stmtList())
return n
case ir.OSELECT, ir.OSWITCH:
n := ir.NodAt(r.pos(), op, nil, nil)
- n.Ninit.Set(r.stmtList())
+ n.PtrInit().Set(r.stmtList())
left, _ := r.exprsOrNil()
- n.Left = left
- n.List.Set(r.caseList(n))
+ n.SetLeft(left)
+ n.PtrList().Set(r.caseList(n))
return n
// case OCASE:
pos := r.pos()
left, _ := r.exprsOrNil()
if left != nil {
- left = NewName(left.Sym)
+ left = NewName(left.Sym())
}
return ir.NodAt(pos, op, left, nil)
case ir.OGOTO, ir.OLABEL:
n := ir.NodAt(r.pos(), op, nil, nil)
- n.Sym = lookup(r.string())
+ n.SetSym(lookup(r.string()))
return n
case ir.OEND:
// Make a function that contains all the initialization statements.
if len(nf) > 0 {
- base.Pos = nf[0].Pos // prolog/epilog gets line number of first init stmt
+ base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
initializers := lookup("init")
fn := dclfunc(initializers, ir.Nod(ir.OTFUNC, nil, nil))
- for _, dcl := range initTodo.Func.Dcl {
- dcl.Name.Curfn = fn
+ for _, dcl := range initTodo.Func().Dcl {
+ dcl.Name().Curfn = fn
}
- fn.Func.Dcl = append(fn.Func.Dcl, initTodo.Func.Dcl...)
- initTodo.Func.Dcl = nil
+ fn.Func().Dcl = append(fn.Func().Dcl, initTodo.Func().Dcl...)
+ initTodo.Func().Dcl = nil
- fn.Nbody.Set(nf)
+ fn.PtrBody().Set(nf)
funcbody()
fn = typecheck(fn, ctxStmt)
xtop = append(xtop, fn)
fns = append(fns, initializers.Linksym())
}
- if initTodo.Func.Dcl != nil {
+ if initTodo.Func().Dcl != nil {
// We only generate temps using initTodo if there
// are package-scope initialization statements, so
// something's weird if we get here.
// Record user init functions.
for i := 0; i < renameinitgen; i++ {
s := lookupN("init.", i)
- fn := ir.AsNode(s.Def).Name.Defn
+ fn := ir.AsNode(s.Def).Name().Defn
// Skip init functions with empty bodies.
- if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == ir.OEMPTY {
+ if fn.Body().Len() == 1 && fn.Body().First().Op() == ir.OEMPTY {
continue
}
fns = append(fns, s.Linksym())
// Make an .inittask structure.
sym := lookup(".inittask")
nn := NewName(sym)
- nn.Type = types.Types[types.TUINT8] // fake type
+ nn.SetType(types.Types[types.TUINT8]) // fake type
nn.SetClass(ir.PEXTERN)
sym.Def = ir.AsTypesNode(nn)
exportsym(nn)
// Process all package-level assignment in declaration order.
for _, n := range l {
- switch n.Op {
+ switch n.Op() {
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
o.processAssign(n)
o.flushReady(s.staticInit)
// Check that all assignments are now Done; if not, there must
// have been a dependency cycle.
for _, n := range l {
- switch n.Op {
+ switch n.Op() {
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
if n.Initorder() != InitDone {
// If there have already been errors
}
func (o *InitOrder) processAssign(n *ir.Node) {
- if n.Initorder() != InitNotStarted || n.Xoffset != types.BADWIDTH {
- base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+ if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH {
+ base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
n.SetInitorder(InitPending)
- n.Xoffset = 0
+ n.SetOffset(0)
// Compute number of variable dependencies and build the
// inverse dependency ("blocking") graph.
for dep := range collectDeps(n, true) {
- defn := dep.Name.Defn
+ defn := dep.Name().Defn
// Skip dependencies on functions (PFUNC) and
// variables already initialized (InitDone).
if dep.Class() != ir.PEXTERN || defn.Initorder() == InitDone {
continue
}
- n.Xoffset = n.Xoffset + 1
+ n.SetOffset(n.Offset() + 1)
o.blocking[defn] = append(o.blocking[defn], n)
}
- if n.Xoffset == 0 {
+ if n.Offset() == 0 {
heap.Push(&o.ready, n)
}
}
func (o *InitOrder) flushReady(initialize func(*ir.Node)) {
for o.ready.Len() != 0 {
n := heap.Pop(&o.ready).(*ir.Node)
- if n.Initorder() != InitPending || n.Xoffset != 0 {
- base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Xoffset)
+ if n.Initorder() != InitPending || n.Offset() != 0 {
+ base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
initialize(n)
n.SetInitorder(InitDone)
- n.Xoffset = types.BADWIDTH
+ n.SetOffset(types.BADWIDTH)
blocked := o.blocking[n]
delete(o.blocking, n)
for _, m := range blocked {
- m.Xoffset = m.Xoffset - 1
- if m.Xoffset == 0 {
+ m.SetOffset(m.Offset() - 1)
+ if m.Offset() == 0 {
heap.Push(&o.ready, m)
}
}
// There might be multiple loops involving n; by sorting
// references, we deterministically pick the one reported.
- refers := collectDeps(n.Name.Defn, false).Sorted(func(ni, nj *ir.Node) bool {
- return ni.Pos.Before(nj.Pos)
+ refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj *ir.Node) bool {
+ return ni.Pos().Before(nj.Pos())
})
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
- if ref.Class() == ir.PEXTERN && ref.Name.Defn.Initorder() == InitDone {
+ if ref.Class() == ir.PEXTERN && ref.Name().Defn.Initorder() == InitDone {
continue
}
// the start.
i := -1
for j, n := range l {
- if n.Class() == ir.PEXTERN && (i == -1 || n.Pos.Before(l[i].Pos)) {
+ if n.Class() == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
i = j
}
}
}
fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
- base.ErrorfAt(l[0].Pos, msg.String())
+ base.ErrorfAt(l[0].Pos(), msg.String())
base.ErrorExit()
}
// upon functions (but not variables).
func collectDeps(n *ir.Node, transitive bool) ir.NodeSet {
d := initDeps{transitive: transitive}
- switch n.Op {
+ switch n.Op() {
case ir.OAS:
- d.inspect(n.Right)
+ d.inspect(n.Right())
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
- d.inspect(n.Right)
+ d.inspect(n.Right())
case ir.ODCLFUNC:
- d.inspectList(n.Nbody)
+ d.inspectList(n.Body())
default:
- base.Fatalf("unexpected Op: %v", n.Op)
+ base.Fatalf("unexpected Op: %v", n.Op())
}
return d.seen
}
// visit calls foundDep on any package-level functions or variables
// referenced by n, if any.
func (d *initDeps) visit(n *ir.Node) bool {
- switch n.Op {
+ switch n.Op() {
case ir.OMETHEXPR:
d.foundDep(methodExprName(n))
return false
}
case ir.OCLOSURE:
- d.inspectList(n.Func.Decl.Nbody)
+ d.inspectList(n.Func().Decl.Body())
case ir.ODOTMETH, ir.OCALLPART:
d.foundDep(methodExprName(n))
// Names without definitions aren't interesting as far as
// initialization ordering goes.
- if n.Name.Defn == nil {
+ if n.Name().Defn == nil {
return
}
}
d.seen.Add(n)
if d.transitive && n.Class() == ir.PFUNC {
- d.inspectList(n.Name.Defn.Nbody)
+ d.inspectList(n.Name().Defn.Body())
}
}
// but both OAS nodes use the "=" token's position as their Pos.
type declOrder []*ir.Node
-func (s declOrder) Len() int { return len(s) }
-func (s declOrder) Less(i, j int) bool { return firstLHS(s[i]).Pos.Before(firstLHS(s[j]).Pos) }
-func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s declOrder) Len() int { return len(s) }
+func (s declOrder) Less(i, j int) bool {
+ return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
+}
+func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*ir.Node)) }
func (s *declOrder) Pop() interface{} {
// firstLHS returns the first expression on the left-hand side of
// assignment n.
func firstLHS(n *ir.Node) *ir.Node {
- switch n.Op {
+ switch n.Op() {
case ir.OAS:
- return n.Left
+ return n.Left()
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
- return n.List.First()
+ return n.List().First()
}
- base.Fatalf("unexpected Op: %v", n.Op)
+ base.Fatalf("unexpected Op: %v", n.Op())
return nil
}
func fnpkg(fn *ir.Node) *types.Pkg {
if ir.IsMethod(fn) {
// method
- rcvr := fn.Type.Recv().Type
+ rcvr := fn.Type().Recv().Type
if rcvr.IsPtr() {
rcvr = rcvr.Elem()
}
if rcvr.Sym == nil {
- base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym, fn, rcvr)
+ base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr)
}
return rcvr.Sym.Pkg
}
// non-method
- return fn.Sym.Pkg
+ return fn.Sym().Pkg
}
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
}
if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
- fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, ir.AsNodes(fn.Func.Inl.Body))
+ fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym(), fn, ir.AsNodes(fn.Func().Inl.Body))
}
savefn := Curfn
Curfn = fn
- typecheckslice(fn.Func.Inl.Body, ctxStmt)
+ typecheckslice(fn.Func().Inl.Body, ctxStmt)
Curfn = savefn
// During expandInline (which imports fn.Func.Inl.Body),
// to fn.Func.Inl.Dcl for consistency with how local functions
// behave. (Append because typecheckinl may be called multiple
// times.)
- fn.Func.Inl.Dcl = append(fn.Func.Inl.Dcl, fn.Func.Dcl...)
- fn.Func.Dcl = nil
+ fn.Func().Inl.Dcl = append(fn.Func().Inl.Dcl, fn.Func().Dcl...)
+ fn.Func().Dcl = nil
base.Pos = lno
}
// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
func caninl(fn *ir.Node) {
- if fn.Op != ir.ODCLFUNC {
+ if fn.Op() != ir.ODCLFUNC {
base.Fatalf("caninl %v", fn)
}
- if fn.Func.Nname == nil {
+ if fn.Func().Nname == nil {
base.Fatalf("caninl no nname %+v", fn)
}
defer func() {
if reason != "" {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Func.Nname, reason)
+ fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Func().Nname, reason)
}
if logopt.Enabled() {
- logopt.LogOpt(fn.Pos, "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
+ logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
}
}
}()
}
// If marked "go:noinline", don't inline
- if fn.Func.Pragma&ir.Noinline != 0 {
+ if fn.Func().Pragma&ir.Noinline != 0 {
reason = "marked go:noinline"
return
}
// If marked "go:norace" and -race compilation, don't inline.
- if base.Flag.Race && fn.Func.Pragma&ir.Norace != 0 {
+ if base.Flag.Race && fn.Func().Pragma&ir.Norace != 0 {
reason = "marked go:norace with -race compilation"
return
}
// If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
- if base.Debug.Checkptr != 0 && fn.Func.Pragma&ir.NoCheckPtr != 0 {
+ if base.Debug.Checkptr != 0 && fn.Func().Pragma&ir.NoCheckPtr != 0 {
reason = "marked go:nocheckptr"
return
}
// If marked "go:cgo_unsafe_args", don't inline, since the
// function makes assumptions about its argument frame layout.
- if fn.Func.Pragma&ir.CgoUnsafeArgs != 0 {
+ if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 {
reason = "marked go:cgo_unsafe_args"
return
}
// If marked as "go:uintptrescapes", don't inline, since the
// escape information is lost during inlining.
- if fn.Func.Pragma&ir.UintptrEscapes != 0 {
+ if fn.Func().Pragma&ir.UintptrEscapes != 0 {
reason = "marked as having an escaping uintptr argument"
return
}
// granularity, so inlining yeswritebarrierrec functions can
// confuse it (#22342). As a workaround, disallow inlining
// them for now.
- if fn.Func.Pragma&ir.Yeswritebarrierrec != 0 {
+ if fn.Func().Pragma&ir.Yeswritebarrierrec != 0 {
reason = "marked go:yeswritebarrierrec"
return
}
// If fn has no body (is defined outside of Go), cannot inline it.
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
reason = "no function body"
return
}
base.Fatalf("caninl on non-typechecked function %v", fn)
}
- n := fn.Func.Nname
- if n.Func.InlinabilityChecked() {
+ n := fn.Func().Nname
+ if n.Func().InlinabilityChecked() {
return
}
- defer n.Func.SetInlinabilityChecked(true)
+ defer n.Func().SetInlinabilityChecked(true)
cc := int32(inlineExtraCallCost)
if base.Flag.LowerL == 4 {
extraCallCost: cc,
usedLocals: make(map[*ir.Node]bool),
}
- if visitor.visitList(fn.Nbody) {
+ if visitor.visitList(fn.Body()) {
reason = visitor.reason
return
}
return
}
- n.Func.Inl = &ir.Inline{
+ n.Func().Inl = &ir.Inline{
Cost: inlineMaxBudget - visitor.budget,
- Dcl: inlcopylist(pruneUnusedAutos(n.Name.Defn.Func.Dcl, &visitor)),
- Body: inlcopylist(fn.Nbody.Slice()),
+ Dcl: inlcopylist(pruneUnusedAutos(n.Name().Defn.Func().Dcl, &visitor)),
+ Body: inlcopylist(fn.Body().Slice()),
}
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type, ir.AsNodes(n.Func.Inl.Body))
+ fmt.Printf("%v: can inline %#v with cost %d as: %#v { %#v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.AsNodes(n.Func().Inl.Body))
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
}
if logopt.Enabled() {
- logopt.LogOpt(fn.Pos, "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
+ logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
}
}
if n == nil {
return
}
- if n.Op != ir.ONAME || n.Class() != ir.PFUNC {
- base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class())
+ if n.Op() != ir.ONAME || n.Class() != ir.PFUNC {
+ base.Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op(), n.Class())
}
- if n.Func == nil {
+ if n.Func() == nil {
base.Fatalf("inlFlood: missing Func on %v", n)
}
- if n.Func.Inl == nil {
+ if n.Func().Inl == nil {
return
}
- if n.Func.ExportInline() {
+ if n.Func().ExportInline() {
return
}
- n.Func.SetExportInline(true)
+ n.Func().SetExportInline(true)
typecheckinl(n)
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
- ir.InspectList(ir.AsNodes(n.Func.Inl.Body), func(n *ir.Node) bool {
- switch n.Op {
+ ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n *ir.Node) bool {
+ switch n.Op() {
case ir.OMETHEXPR:
inlFlood(methodExprName(n))
return false
}
- switch n.Op {
+ switch n.Op() {
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLFUNC:
// Functions that call runtime.getcaller{pc,sp} can not be inlined
// because getcaller{pc,sp} expect a pointer to the caller's first argument.
//
// runtime.throw is a "cheap call" like panic in normal code.
- if n.Left.Op == ir.ONAME && n.Left.Class() == ir.PFUNC && isRuntimePkg(n.Left.Sym.Pkg) {
- fn := n.Left.Sym.Name
+ if n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC && isRuntimePkg(n.Left().Sym().Pkg) {
+ fn := n.Left().Sym().Name
if fn == "getcallerpc" || fn == "getcallersp" {
v.reason = "call to " + fn
return true
break
}
- if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
- v.budget -= fn.Func.Inl.Cost
+ if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil {
+ v.budget -= fn.Func().Inl.Cost
break
}
// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLMETH:
- t := n.Left.Type
+ t := n.Left().Type()
if t == nil {
- base.Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
+ base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left())
}
- if isRuntimePkg(n.Left.Sym.Pkg) {
- fn := n.Left.Sym.Name
+ if isRuntimePkg(n.Left().Sym().Pkg) {
+ fn := n.Left().Sym().Name
if fn == "heapBits.nextArena" {
// Special case: explicitly allow
// mid-stack inlining of
break
}
}
- if inlfn := methodExprName(n.Left).Func; inlfn.Inl != nil {
+ if inlfn := methodExprName(n.Left()).Func(); inlfn.Inl != nil {
v.budget -= inlfn.Inl.Cost
break
}
ir.ODEFER,
ir.ODCLTYPE, // can't print yet
ir.ORETJMP:
- v.reason = "unhandled op " + n.Op.String()
+ v.reason = "unhandled op " + n.Op().String()
return true
case ir.OAPPEND:
}
case ir.OBREAK, ir.OCONTINUE:
- if n.Sym != nil {
+ if n.Sym() != nil {
// Should have short-circuited due to labeledControl above.
base.Fatalf("unexpected labeled break/continue: %v", n)
}
case ir.OIF:
- if ir.IsConst(n.Left, constant.Bool) {
+ if ir.IsConst(n.Left(), constant.Bool) {
// This if and the condition cost nothing.
- return v.visitList(n.Ninit) || v.visitList(n.Nbody) ||
- v.visitList(n.Rlist)
+ return v.visitList(n.Init()) || v.visitList(n.Body()) ||
+ v.visitList(n.Rlist())
}
case ir.ONAME:
return true
}
- return v.visit(n.Left) || v.visit(n.Right) ||
- v.visitList(n.List) || v.visitList(n.Rlist) ||
- v.visitList(n.Ninit) || v.visitList(n.Nbody)
+ return v.visit(n.Left()) || v.visit(n.Right()) ||
+ v.visitList(n.List()) || v.visitList(n.Rlist()) ||
+ v.visitList(n.Init()) || v.visitList(n.Body())
}
// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
return nil
}
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
return n
}
m := ir.Copy(n)
- if n.Op != ir.OCALLPART && m.Func != nil {
+ if n.Op() != ir.OCALLPART && m.Func() != nil {
base.Fatalf("unexpected Func: %v", m)
}
- m.Left = inlcopy(n.Left)
- m.Right = inlcopy(n.Right)
- m.List.Set(inlcopylist(n.List.Slice()))
- m.Rlist.Set(inlcopylist(n.Rlist.Slice()))
- m.Ninit.Set(inlcopylist(n.Ninit.Slice()))
- m.Nbody.Set(inlcopylist(n.Nbody.Slice()))
+ m.SetLeft(inlcopy(n.Left()))
+ m.SetRight(inlcopy(n.Right()))
+ m.PtrList().Set(inlcopylist(n.List().Slice()))
+ m.PtrRlist().Set(inlcopylist(n.Rlist().Slice()))
+ m.PtrInit().Set(inlcopylist(n.Init().Slice()))
+ m.PtrBody().Set(inlcopylist(n.Body().Slice()))
return m
}
return 0
}
cnt := 1
- cnt += countNodes(n.Left)
- cnt += countNodes(n.Right)
- for _, n1 := range n.Ninit.Slice() {
+ cnt += countNodes(n.Left())
+ cnt += countNodes(n.Right())
+ for _, n1 := range n.Init().Slice() {
cnt += countNodes(n1)
}
- for _, n1 := range n.Nbody.Slice() {
+ for _, n1 := range n.Body().Slice() {
cnt += countNodes(n1)
}
- for _, n1 := range n.List.Slice() {
+ for _, n1 := range n.List().Slice() {
cnt += countNodes(n1)
}
- for _, n1 := range n.Rlist.Slice() {
+ for _, n1 := range n.Rlist().Slice() {
cnt += countNodes(n1)
}
return cnt
// Turn an OINLCALL into a statement.
func inlconv2stmt(n *ir.Node) {
- n.Op = ir.OBLOCK
+ n.SetOp(ir.OBLOCK)
// n->ninit stays
- n.List.Set(n.Nbody.Slice())
+ n.PtrList().Set(n.Body().Slice())
- n.Nbody.Set(nil)
- n.Rlist.Set(nil)
+ n.PtrBody().Set(nil)
+ n.PtrRlist().Set(nil)
}
// Turn an OINLCALL into a single valued expression.
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
func inlconv2expr(n *ir.Node) *ir.Node {
- r := n.Rlist.First()
- return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...))
+ r := n.Rlist().First()
+ return addinit(r, append(n.Init().Slice(), n.Body().Slice()...))
}
// Turn the rlist (with the return values) of the OINLCALL in
// order will be preserved Used in return, oas2func and call
// statements.
func inlconv2list(n *ir.Node) []*ir.Node {
- if n.Op != ir.OINLCALL || n.Rlist.Len() == 0 {
+ if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 {
base.Fatalf("inlconv2list %+v\n", n)
}
- s := n.Rlist.Slice()
- s[0] = addinit(s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...))
+ s := n.Rlist().Slice()
+ s[0] = addinit(s[0], append(n.Init().Slice(), n.Body().Slice()...))
return s
}
return n
}
- switch n.Op {
+ switch n.Op() {
case ir.ODEFER, ir.OGO:
- switch n.Left.Op {
+ switch n.Left().Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
- n.Left.SetNoInline(true)
+ n.Left().SetNoInline(true)
}
// TODO do them here (or earlier),
case ir.OCALLMETH:
// Prevent inlining some reflect.Value methods when using checkptr,
// even when package reflect was compiled without it (#35073).
- if s := n.Left.Sym; base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+ if s := n.Left().Sym(); base.Debug.Checkptr != 0 && isReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
return n
}
}
lno := setlineno(n)
- inlnodelist(n.Ninit, maxCost, inlMap)
- for _, n1 := range n.Ninit.Slice() {
- if n1.Op == ir.OINLCALL {
+ inlnodelist(n.Init(), maxCost, inlMap)
+ for _, n1 := range n.Init().Slice() {
+ if n1.Op() == ir.OINLCALL {
inlconv2stmt(n1)
}
}
- n.Left = inlnode(n.Left, maxCost, inlMap)
- if n.Left != nil && n.Left.Op == ir.OINLCALL {
- n.Left = inlconv2expr(n.Left)
+ n.SetLeft(inlnode(n.Left(), maxCost, inlMap))
+ if n.Left() != nil && n.Left().Op() == ir.OINLCALL {
+ n.SetLeft(inlconv2expr(n.Left()))
}
- n.Right = inlnode(n.Right, maxCost, inlMap)
- if n.Right != nil && n.Right.Op == ir.OINLCALL {
- if n.Op == ir.OFOR || n.Op == ir.OFORUNTIL {
- inlconv2stmt(n.Right)
- } else if n.Op == ir.OAS2FUNC {
- n.Rlist.Set(inlconv2list(n.Right))
- n.Right = nil
- n.Op = ir.OAS2
+ n.SetRight(inlnode(n.Right(), maxCost, inlMap))
+ if n.Right() != nil && n.Right().Op() == ir.OINLCALL {
+ if n.Op() == ir.OFOR || n.Op() == ir.OFORUNTIL {
+ inlconv2stmt(n.Right())
+ } else if n.Op() == ir.OAS2FUNC {
+ n.PtrRlist().Set(inlconv2list(n.Right()))
+ n.SetRight(nil)
+ n.SetOp(ir.OAS2)
n.SetTypecheck(0)
n = typecheck(n, ctxStmt)
} else {
- n.Right = inlconv2expr(n.Right)
+ n.SetRight(inlconv2expr(n.Right()))
}
}
- inlnodelist(n.List, maxCost, inlMap)
- if n.Op == ir.OBLOCK {
- for _, n2 := range n.List.Slice() {
- if n2.Op == ir.OINLCALL {
+ inlnodelist(n.List(), maxCost, inlMap)
+ if n.Op() == ir.OBLOCK {
+ for _, n2 := range n.List().Slice() {
+ if n2.Op() == ir.OINLCALL {
inlconv2stmt(n2)
}
}
} else {
- s := n.List.Slice()
+ s := n.List().Slice()
for i1, n1 := range s {
- if n1 != nil && n1.Op == ir.OINLCALL {
+ if n1 != nil && n1.Op() == ir.OINLCALL {
s[i1] = inlconv2expr(s[i1])
}
}
}
- inlnodelist(n.Rlist, maxCost, inlMap)
- s := n.Rlist.Slice()
+ inlnodelist(n.Rlist(), maxCost, inlMap)
+ s := n.Rlist().Slice()
for i1, n1 := range s {
- if n1.Op == ir.OINLCALL {
- if n.Op == ir.OIF {
+ if n1.Op() == ir.OINLCALL {
+ if n.Op() == ir.OIF {
inlconv2stmt(n1)
} else {
s[i1] = inlconv2expr(s[i1])
}
}
- inlnodelist(n.Nbody, maxCost, inlMap)
- for _, n := range n.Nbody.Slice() {
- if n.Op == ir.OINLCALL {
+ inlnodelist(n.Body(), maxCost, inlMap)
+ for _, n := range n.Body().Slice() {
+ if n.Op() == ir.OINLCALL {
inlconv2stmt(n)
}
}
// with all the branches out of the way, it is now time to
// transmogrify this node itself unless inhibited by the
// switch at the top of this function.
- switch n.Op {
+ switch n.Op() {
case ir.OCALLFUNC, ir.OCALLMETH:
if n.NoInline() {
return n
}
}
- switch n.Op {
+ switch n.Op() {
case ir.OCALLFUNC:
if base.Flag.LowerM > 3 {
- fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left)
+ fmt.Printf("%v:call to func %+v\n", ir.Line(n), n.Left())
}
if isIntrinsicCall(n) {
break
}
- if fn := inlCallee(n.Left); fn != nil && fn.Func.Inl != nil {
+ if fn := inlCallee(n.Left()); fn != nil && fn.Func().Inl != nil {
n = mkinlcall(n, fn, maxCost, inlMap)
}
case ir.OCALLMETH:
if base.Flag.LowerM > 3 {
- fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left.Right)
+ fmt.Printf("%v:call to meth %L\n", ir.Line(n), n.Left().Right())
}
// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
- if n.Left.Type == nil {
- base.Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
+ if n.Left().Type() == nil {
+ base.Fatalf("no function type for [%p] %+v\n", n.Left(), n.Left())
}
- n = mkinlcall(n, methodExprName(n.Left), maxCost, inlMap)
+ n = mkinlcall(n, methodExprName(n.Left()), maxCost, inlMap)
}
base.Pos = lno
func inlCallee(fn *ir.Node) *ir.Node {
fn = staticValue(fn)
switch {
- case fn.Op == ir.OMETHEXPR:
+ case fn.Op() == ir.OMETHEXPR:
n := methodExprName(fn)
// Check that receiver type matches fn.Left.
// TODO(mdempsky): Handle implicit dereference
// of pointer receiver argument?
- if n == nil || !types.Identical(n.Type.Recv().Type, fn.Left.Type) {
+ if n == nil || !types.Identical(n.Type().Recv().Type, fn.Left().Type()) {
return nil
}
return n
- case fn.Op == ir.ONAME && fn.Class() == ir.PFUNC:
+ case fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC:
return fn
- case fn.Op == ir.OCLOSURE:
- c := fn.Func.Decl
+ case fn.Op() == ir.OCLOSURE:
+ c := fn.Func().Decl
caninl(c)
- return c.Func.Nname
+ return c.Func().Nname
}
return nil
}
func staticValue(n *ir.Node) *ir.Node {
for {
- if n.Op == ir.OCONVNOP {
- n = n.Left
+ if n.Op() == ir.OCONVNOP {
+ n = n.Left()
continue
}
// that is initialized and never reassigned, staticValue1 returns the initializer
// expression. Otherwise, it returns nil.
func staticValue1(n *ir.Node) *ir.Node {
- if n.Op != ir.ONAME || n.Class() != ir.PAUTO || n.Name.Addrtaken() {
+ if n.Op() != ir.ONAME || n.Class() != ir.PAUTO || n.Name().Addrtaken() {
return nil
}
- defn := n.Name.Defn
+ defn := n.Name().Defn
if defn == nil {
return nil
}
var rhs *ir.Node
FindRHS:
- switch defn.Op {
+ switch defn.Op() {
case ir.OAS:
- rhs = defn.Right
+ rhs = defn.Right()
case ir.OAS2:
- for i, lhs := range defn.List.Slice() {
+ for i, lhs := range defn.List().Slice() {
if lhs == n {
- rhs = defn.Rlist.Index(i)
+ rhs = defn.Rlist().Index(i)
break FindRHS
}
}
// NB: global variables are always considered to be re-assigned.
// TODO: handle initial declaration not including an assignment and followed by a single assignment?
func reassigned(n *ir.Node) (bool, *ir.Node) {
- if n.Op != ir.ONAME {
+ if n.Op() != ir.ONAME {
base.Fatalf("reassigned %v", n)
}
// no way to reliably check for no-reassignment of globals, assume it can be
- if n.Name.Curfn == nil {
+ if n.Name().Curfn == nil {
return true, nil
}
- f := n.Name.Curfn
+ f := n.Name().Curfn
// There just might be a good reason for this although this can be pretty surprising:
// local variables inside a closure have Curfn pointing to the OCLOSURE node instead
// of the corresponding ODCLFUNC.
// We need to walk the function body to check for reassignments so we follow the
// linkage to the ODCLFUNC node as that is where body is held.
- if f.Op == ir.OCLOSURE {
- f = f.Func.Decl
+ if f.Op() == ir.OCLOSURE {
+ f = f.Func().Decl
}
v := reassignVisitor{name: n}
- a := v.visitList(f.Nbody)
+ a := v.visitList(f.Body())
return a != nil, a
}
if n == nil {
return nil
}
- switch n.Op {
+ switch n.Op() {
case ir.OAS:
- if n.Left == v.name && n != v.name.Name.Defn {
+ if n.Left() == v.name && n != v.name.Name().Defn {
return n
}
case ir.OAS2, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2DOTTYPE:
- for _, p := range n.List.Slice() {
- if p == v.name && n != v.name.Name.Defn {
+ for _, p := range n.List().Slice() {
+ if p == v.name && n != v.name.Name().Defn {
return n
}
}
}
- if a := v.visit(n.Left); a != nil {
+ if a := v.visit(n.Left()); a != nil {
return a
}
- if a := v.visit(n.Right); a != nil {
+ if a := v.visit(n.Right()); a != nil {
return a
}
- if a := v.visitList(n.List); a != nil {
+ if a := v.visitList(n.List()); a != nil {
return a
}
- if a := v.visitList(n.Rlist); a != nil {
+ if a := v.visitList(n.Rlist()); a != nil {
return a
}
- if a := v.visitList(n.Ninit); a != nil {
+ if a := v.visitList(n.Init()); a != nil {
return a
}
- if a := v.visitList(n.Nbody); a != nil {
+ if a := v.visitList(n.Body()); a != nil {
return a
}
return nil
if inlvar == nil {
base.Fatalf("missing inlvar for %v", n)
}
- as.Ninit.Append(ir.Nod(ir.ODCL, inlvar, nil))
- inlvar.Name.Defn = as
+ as.PtrInit().Append(ir.Nod(ir.ODCL, inlvar, nil))
+ inlvar.Name().Defn = as
return inlvar
}
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node {
- if fn.Func.Inl == nil {
+ if fn.Func().Inl == nil {
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", ir.FuncName(Curfn),
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
}
return n
}
- if fn.Func.Inl.Cost > maxCost {
+ if fn.Func().Inl.Cost > maxCost {
// The inlined function body is too big. Typically we use this check to restrict
// inlining into very big functions. See issue 26546 and 17566.
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", ir.FuncName(Curfn),
- fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func.Inl.Cost, ir.PkgFuncName(fn), maxCost))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
+ fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Func().Inl.Cost, ir.PkgFuncName(fn), maxCost))
}
return n
}
- if fn == Curfn || fn.Name.Defn == Curfn {
+ if fn == Curfn || fn.Name().Defn == Curfn {
// Can't recursively inline a function into itself.
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn)))
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(Curfn)))
}
return n
}
- if instrumenting && isRuntimePkg(fn.Sym.Pkg) {
+ if instrumenting && isRuntimePkg(fn.Sym().Pkg) {
// Runtime package must not be instrumented.
// Instrument skips runtime package. However, some runtime code can be
// inlined into other packages and instrumented there. To avoid this,
// We have a function node, and it has an inlineable body.
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym, fn.Type, ir.AsNodes(fn.Func.Inl.Body))
+ fmt.Printf("%v: inlining call to %v %#v { %#v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.AsNodes(fn.Func().Inl.Body))
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
ssaDumpInlined = append(ssaDumpInlined, fn)
}
- ninit := n.Ninit
+ ninit := n.Init()
// For normal function calls, the function callee expression
// may contain side effects (e.g., added by addinit during
// inlconv2expr or inlconv2list). Make sure to preserve these,
// if necessary (#42703).
- if n.Op == ir.OCALLFUNC {
- callee := n.Left
- for callee.Op == ir.OCONVNOP {
- ninit.AppendNodes(&callee.Ninit)
- callee = callee.Left
+ if n.Op() == ir.OCALLFUNC {
+ callee := n.Left()
+ for callee.Op() == ir.OCONVNOP {
+ ninit.AppendNodes(callee.PtrInit())
+ callee = callee.Left()
}
- if callee.Op != ir.ONAME && callee.Op != ir.OCLOSURE && callee.Op != ir.OMETHEXPR {
+ if callee.Op() != ir.ONAME && callee.Op() != ir.OCLOSURE && callee.Op() != ir.OMETHEXPR {
base.Fatalf("unexpected callee expression: %v", callee)
}
}
var inlfvars []*ir.Node
// Handle captured variables when inlining closures.
- if fn.Name.Defn != nil {
- if c := fn.Name.Defn.Func.OClosure; c != nil {
- for _, v := range c.Func.ClosureVars.Slice() {
- if v.Op == ir.OXXX {
+ if fn.Name().Defn != nil {
+ if c := fn.Name().Defn.Func().OClosure; c != nil {
+ for _, v := range c.Func().ClosureVars.Slice() {
+ if v.Op() == ir.OXXX {
continue
}
- o := v.Name.Param.Outer
+ o := v.Name().Param.Outer
// make sure the outer param matches the inlining location
// NB: if we enabled inlining of functions containing OCLOSURE or refined
// the reassigned check via some sort of copy propagation this would most
// likely need to be changed to a loop to walk up to the correct Param
- if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.OClosure != Curfn) {
+ if o == nil || (o.Name().Curfn != Curfn && o.Name().Curfn.Func().OClosure != Curfn) {
base.Fatalf("%v: unresolvable capture %v %v\n", ir.Line(n), fn, v)
}
- if v.Name.Byval() {
+ if v.Name().Byval() {
iv := typecheck(inlvar(v), ctxExpr)
ninit.Append(ir.Nod(ir.ODCL, iv, nil))
ninit.Append(typecheck(ir.Nod(ir.OAS, iv, o), ctxStmt))
inlvars[v] = iv
} else {
- addr := NewName(lookup("&" + v.Sym.Name))
- addr.Type = types.NewPtr(v.Type)
+ addr := NewName(lookup("&" + v.Sym().Name))
+ addr.SetType(types.NewPtr(v.Type()))
ia := typecheck(inlvar(addr), ctxExpr)
ninit.Append(ir.Nod(ir.ODCL, ia, nil))
ninit.Append(typecheck(ir.Nod(ir.OAS, ia, ir.Nod(ir.OADDR, o, nil)), ctxStmt))
}
}
- for _, ln := range fn.Func.Inl.Dcl {
- if ln.Op != ir.ONAME {
+ for _, ln := range fn.Func().Inl.Dcl {
+ if ln.Op() != ir.ONAME {
continue
}
if ln.Class() == ir.PPARAMOUT { // return values handled below.
inlvars[ln] = inlf
if base.Flag.GenDwarfInl > 0 {
if ln.Class() == ir.PPARAM {
- inlf.Name.SetInlFormal(true)
+ inlf.Name().SetInlFormal(true)
} else {
- inlf.Name.SetInlLocal(true)
+ inlf.Name().SetInlLocal(true)
}
- inlf.Pos = ln.Pos
+ inlf.SetPos(ln.Pos())
inlfvars = append(inlfvars, inlf)
}
}
nreturns := 0
- ir.InspectList(ir.AsNodes(fn.Func.Inl.Body), func(n *ir.Node) bool {
- if n != nil && n.Op == ir.ORETURN {
+ ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n *ir.Node) bool {
+ if n != nil && n.Op() == ir.ORETURN {
nreturns++
}
return true
// temporaries for return values.
var retvars []*ir.Node
- for i, t := range fn.Type.Results().Fields().Slice() {
+ for i, t := range fn.Type().Results().Fields().Slice() {
var m *ir.Node
- if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym.Name, "~r") {
+ if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") {
m = inlvar(n)
m = typecheck(m, ctxExpr)
inlvars[n] = m
// Don't update the src.Pos on a return variable if it
// was manufactured by the inliner (e.g. "~R2"); such vars
// were not part of the original callee.
- if !strings.HasPrefix(m.Sym.Name, "~R") {
- m.Name.SetInlFormal(true)
- m.Pos = t.Pos
+ if !strings.HasPrefix(m.Sym().Name, "~R") {
+ m.Name().SetInlFormal(true)
+ m.SetPos(t.Pos)
inlfvars = append(inlfvars, m)
}
}
// Assign arguments to the parameters' temp names.
as := ir.Nod(ir.OAS2, nil, nil)
as.SetColas(true)
- if n.Op == ir.OCALLMETH {
- if n.Left.Left == nil {
+ if n.Op() == ir.OCALLMETH {
+ if n.Left().Left() == nil {
base.Fatalf("method call without receiver: %+v", n)
}
- as.Rlist.Append(n.Left.Left)
+ as.PtrRlist().Append(n.Left().Left())
}
- as.Rlist.Append(n.List.Slice()...)
+ as.PtrRlist().Append(n.List().Slice()...)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
var vas *ir.Node
- if recv := fn.Type.Recv(); recv != nil {
- as.List.Append(inlParam(recv, as, inlvars))
+ if recv := fn.Type().Recv(); recv != nil {
+ as.PtrList().Append(inlParam(recv, as, inlvars))
}
- for _, param := range fn.Type.Params().Fields().Slice() {
+ for _, param := range fn.Type().Params().Fields().Slice() {
// For ordinary parameters or variadic parameters in
// dotted calls, just add the variable to the
// assignment list, and we're done.
if !param.IsDDD() || n.IsDDD() {
- as.List.Append(inlParam(param, as, inlvars))
+ as.PtrList().Append(inlParam(param, as, inlvars))
continue
}
// Otherwise, we need to collect the remaining values
// to pass as a slice.
- x := as.List.Len()
- for as.List.Len() < as.Rlist.Len() {
- as.List.Append(argvar(param.Type, as.List.Len()))
+ x := as.List().Len()
+ for as.List().Len() < as.Rlist().Len() {
+ as.PtrList().Append(argvar(param.Type, as.List().Len()))
}
- varargs := as.List.Slice()[x:]
+ varargs := as.List().Slice()[x:]
vas = ir.Nod(ir.OAS, nil, nil)
- vas.Left = inlParam(param, vas, inlvars)
+ vas.SetLeft(inlParam(param, vas, inlvars))
if len(varargs) == 0 {
- vas.Right = nodnil()
- vas.Right.Type = param.Type
+ vas.SetRight(nodnil())
+ vas.Right().SetType(param.Type)
} else {
- vas.Right = ir.Nod(ir.OCOMPLIT, nil, typenod(param.Type))
- vas.Right.List.Set(varargs)
+ vas.SetRight(ir.Nod(ir.OCOMPLIT, nil, typenod(param.Type)))
+ vas.Right().PtrList().Set(varargs)
}
}
- if as.Rlist.Len() != 0 {
+ if as.Rlist().Len() != 0 {
as = typecheck(as, ctxStmt)
ninit.Append(as)
}
inlgen++
parent := -1
- if b := base.Ctxt.PosTable.Pos(n.Pos).Base(); b != nil {
+ if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
parent = b.InliningIndex()
}
- newIndex := base.Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym())
+ newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), fn.Sym().Linksym())
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
// (in which case it could go at the end of the function instead).
// Note issue 28603.
inlMark := ir.Nod(ir.OINLMARK, nil, nil)
- inlMark.Pos = n.Pos.WithIsStmt()
- inlMark.Xoffset = int64(newIndex)
+ inlMark.SetPos(n.Pos().WithIsStmt())
+ inlMark.SetOffset(int64(newIndex))
ninit.Append(inlMark)
if base.Flag.GenDwarfInl > 0 {
- if !fn.Sym.Linksym().WasInlined() {
- base.Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn)
- fn.Sym.Linksym().Set(obj.AttrWasInlined, true)
+ if !fn.Sym().Linksym().WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(fn.Sym().Linksym(), fn)
+ fn.Sym().Linksym().Set(obj.AttrWasInlined, true)
}
}
newInlIndex: newIndex,
}
- body := subst.list(ir.AsNodes(fn.Func.Inl.Body))
+ body := subst.list(ir.AsNodes(fn.Func().Inl.Body))
lab := nodSym(ir.OLABEL, nil, retlabel)
body = append(body, lab)
if base.Flag.GenDwarfInl > 0 {
for _, v := range inlfvars {
- v.Pos = subst.updatedPos(v.Pos)
+ v.SetPos(subst.updatedPos(v.Pos()))
}
}
//dumplist("ninit post", ninit);
call := ir.Nod(ir.OINLCALL, nil, nil)
- call.Ninit.Set(ninit.Slice())
- call.Nbody.Set(body)
- call.Rlist.Set(retvars)
- call.Type = n.Type
+ call.PtrInit().Set(ninit.Slice())
+ call.PtrBody().Set(body)
+ call.PtrRlist().Set(retvars)
+ call.SetType(n.Type())
call.SetTypecheck(1)
// transitive inlining
// instead we emit the things that the body needs
// and each use must redo the inlining.
// luckily these are small.
- inlnodelist(call.Nbody, maxCost, inlMap)
- for _, n := range call.Nbody.Slice() {
- if n.Op == ir.OINLCALL {
+ inlnodelist(call.Body(), maxCost, inlMap)
+ for _, n := range call.Body().Slice() {
+ if n.Op() == ir.OINLCALL {
inlconv2stmt(n)
}
}
fmt.Printf("inlvar %+v\n", var_)
}
- n := NewName(var_.Sym)
- n.Type = var_.Type
+ n := NewName(var_.Sym())
+ n.SetType(var_.Type())
n.SetClass(ir.PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- n.Name.SetAddrtaken(var_.Name.Addrtaken())
+ n.Name().SetUsed(true)
+ n.Name().Curfn = Curfn // the calling function, not the called one
+ n.Name().SetAddrtaken(var_.Name().Addrtaken())
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
return n
}
// Synthesize a variable to store the inlined function's results in.
func retvar(t *types.Field, i int) *ir.Node {
n := NewName(lookupN("~R", i))
- n.Type = t.Type
+ n.SetType(t.Type)
n.SetClass(ir.PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ n.Name().SetUsed(true)
+ n.Name().Curfn = Curfn // the calling function, not the called one
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
return n
}
// when they come from a multiple return call.
func argvar(t *types.Type, i int) *ir.Node {
n := NewName(lookupN("~arg", i))
- n.Type = t.Elem()
+ n.SetType(t.Elem())
n.SetClass(ir.PAUTO)
- n.Name.SetUsed(true)
- n.Name.Curfn = Curfn // the calling function, not the called one
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ n.Name().SetUsed(true)
+ n.Name().Curfn = Curfn // the calling function, not the called one
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
return n
}
return nil
}
- switch n.Op {
+ switch n.Op() {
case ir.ONAME:
if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
if base.Flag.LowerM > 2 {
// If n is a named constant or type, we can continue
// using it in the inline copy. Otherwise, make a copy
// so we can update the line number.
- if n.Sym != nil {
+ if n.Sym() != nil {
return n
}
// dump("Return before substitution", n);
case ir.ORETURN:
m := nodSym(ir.OGOTO, nil, subst.retlabel)
- m.Ninit.Set(subst.list(n.Ninit))
+ m.PtrInit().Set(subst.list(n.Init()))
- if len(subst.retvars) != 0 && n.List.Len() != 0 {
+ if len(subst.retvars) != 0 && n.List().Len() != 0 {
as := ir.Nod(ir.OAS2, nil, nil)
// Make a shallow copy of retvars.
// Otherwise OINLCALL.Rlist will be the same list,
// and later walk and typecheck may clobber it.
for _, n := range subst.retvars {
- as.List.Append(n)
+ as.PtrList().Append(n)
}
- as.Rlist.Set(subst.list(n.List))
+ as.PtrRlist().Set(subst.list(n.List()))
if subst.delayretvars {
- for _, n := range as.List.Slice() {
- as.Ninit.Append(ir.Nod(ir.ODCL, n, nil))
- n.Name.Defn = as
+ for _, n := range as.List().Slice() {
+ as.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
+ n.Name().Defn = as
}
}
as = typecheck(as, ctxStmt)
- m.Ninit.Append(as)
+ m.PtrInit().Append(as)
}
- typecheckslice(m.Ninit.Slice(), ctxStmt)
+ typecheckslice(m.Init().Slice(), ctxStmt)
m = typecheck(m, ctxStmt)
// dump("Return after substitution", m);
case ir.OGOTO, ir.OLABEL:
m := ir.Copy(n)
- m.Pos = subst.updatedPos(m.Pos)
- m.Ninit.Set(nil)
- p := fmt.Sprintf("%s·%d", n.Sym.Name, inlgen)
- m.Sym = lookup(p)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ m.PtrInit().Set(nil)
+ p := fmt.Sprintf("%s·%d", n.Sym().Name, inlgen)
+ m.SetSym(lookup(p))
return m
}
m := ir.Copy(n)
- m.Pos = subst.updatedPos(m.Pos)
- m.Ninit.Set(nil)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ m.PtrInit().Set(nil)
- if n.Op == ir.OCLOSURE {
+ if n.Op() == ir.OCLOSURE {
base.Fatalf("cannot inline function containing closure: %+v", n)
}
- m.Left = subst.node(n.Left)
- m.Right = subst.node(n.Right)
- m.List.Set(subst.list(n.List))
- m.Rlist.Set(subst.list(n.Rlist))
- m.Ninit.Set(append(m.Ninit.Slice(), subst.list(n.Ninit)...))
- m.Nbody.Set(subst.list(n.Nbody))
+ m.SetLeft(subst.node(n.Left()))
+ m.SetRight(subst.node(n.Right()))
+ m.PtrList().Set(subst.list(n.List()))
+ m.PtrRlist().Set(subst.list(n.Rlist()))
+ m.PtrInit().Set(append(m.Init().Slice(), subst.list(n.Init())...))
+ m.PtrBody().Set(subst.list(n.Body()))
return m
}
// concrete-type method calls where applicable.
func devirtualize(fn *ir.Node) {
Curfn = fn
- ir.InspectList(fn.Nbody, func(n *ir.Node) bool {
- if n.Op == ir.OCALLINTER {
+ ir.InspectList(fn.Body(), func(n *ir.Node) bool {
+ if n.Op() == ir.OCALLINTER {
devirtualizeCall(n)
}
return true
}
func devirtualizeCall(call *ir.Node) {
- recv := staticValue(call.Left.Left)
- if recv.Op != ir.OCONVIFACE {
+ recv := staticValue(call.Left().Left())
+ if recv.Op() != ir.OCONVIFACE {
return
}
- typ := recv.Left.Type
+ typ := recv.Left().Type()
if typ.IsInterface() {
return
}
- x := ir.NodAt(call.Left.Pos, ir.ODOTTYPE, call.Left.Left, nil)
- x.Type = typ
- x = nodlSym(call.Left.Pos, ir.OXDOT, x, call.Left.Sym)
+ x := ir.NodAt(call.Left().Pos(), ir.ODOTTYPE, call.Left().Left(), nil)
+ x.SetType(typ)
+ x = nodlSym(call.Left().Pos(), ir.OXDOT, x, call.Left().Sym())
x = typecheck(x, ctxExpr|ctxCallee)
- switch x.Op {
+ switch x.Op() {
case ir.ODOTMETH:
if base.Flag.LowerM != 0 {
- base.WarnfAt(call.Pos, "devirtualizing %v to %v", call.Left, typ)
+ base.WarnfAt(call.Pos(), "devirtualizing %v to %v", call.Left(), typ)
}
- call.Op = ir.OCALLMETH
- call.Left = x
+ call.SetOp(ir.OCALLMETH)
+ call.SetLeft(x)
case ir.ODOTINTER:
// Promoted method from embedded interface-typed field (#42279).
if base.Flag.LowerM != 0 {
- base.WarnfAt(call.Pos, "partially devirtualizing %v to %v", call.Left, typ)
+ base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", call.Left(), typ)
}
- call.Op = ir.OCALLINTER
- call.Left = x
+ call.SetOp(ir.OCALLINTER)
+ call.SetLeft(x)
default:
// TODO(mdempsky): Turn back into Fatalf after more testing.
if base.Flag.LowerM != 0 {
- base.WarnfAt(call.Pos, "failed to devirtualize %v (%v)", x, x.Op)
+ base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
}
return
}
// Receiver parameter size may have changed; need to update
// call.Type to get correct stack offsets for result
// parameters.
- checkwidth(x.Type)
- switch ft := x.Type; ft.NumResults() {
+ checkwidth(x.Type())
+ switch ft := x.Type(); ft.NumResults() {
case 0:
case 1:
- call.Type = ft.Results().Field(0).Type
+ call.SetType(ft.Results().Field(0).Type)
default:
- call.Type = ft.Results()
+ call.SetType(ft.Results())
}
}
timings.Start("fe", "typecheck", "top1")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if op := n.Op; op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left.Name.Param.Alias()) {
+ if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.Left().Name().Param.Alias()) {
xtop[i] = typecheck(n, ctxStmt)
}
}
timings.Start("fe", "typecheck", "top2")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if op := n.Op; op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left.Name.Param.Alias() {
+ if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.Left().Name().Param.Alias() {
xtop[i] = typecheck(n, ctxStmt)
}
}
var fcount int64
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if n.Op == ir.ODCLFUNC {
+ if n.Op() == ir.ODCLFUNC {
Curfn = n
decldepth = 1
errorsBefore := base.Errors()
- typecheckslice(Curfn.Nbody.Slice(), ctxStmt)
+ typecheckslice(Curfn.Body().Slice(), ctxStmt)
checkreturn(Curfn)
if base.Errors() > errorsBefore {
- Curfn.Nbody.Set(nil) // type errors; do not compile
+ Curfn.PtrBody().Set(nil) // type errors; do not compile
}
// Now that we've checked whether n terminates,
// we can eliminate some obviously dead code.
// because variables captured by value do not escape.
timings.Start("fe", "capturevars")
for _, n := range xtop {
- if n.Op == ir.ODCLFUNC && n.Func.OClosure != nil {
+ if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil {
Curfn = n
capturevars(n)
}
// Typecheck imported function bodies if Debug.l > 1,
// otherwise lazily when used or re-exported.
for _, n := range importlist {
- if n.Func.Inl != nil {
+ if n.Func().Inl != nil {
typecheckinl(n)
}
}
caninl(n)
} else {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Func.Nname)
+ fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Func().Nname)
}
}
inlcalls(n)
}
for _, n := range xtop {
- if n.Op == ir.ODCLFUNC {
+ if n.Op() == ir.ODCLFUNC {
devirtualize(n)
}
}
// before walk reaches a call of a closure.
timings.Start("fe", "xclosures")
for _, n := range xtop {
- if n.Op == ir.ODCLFUNC && n.Func.OClosure != nil {
+ if n.Op() == ir.ODCLFUNC && n.Func().OClosure != nil {
Curfn = n
transformclosure(n)
}
fcount = 0
for i := 0; i < len(xtop); i++ {
n := xtop[i]
- if n.Op == ir.ODCLFUNC {
+ if n.Op() == ir.ODCLFUNC {
funccompile(n)
fcount++
}
// Phase 9: Check external declarations.
timings.Start("be", "externaldcls")
for i, n := range externdcl {
- if n.Op == ir.ONAME {
+ if n.Op() == ir.ONAME {
externdcl[i] = typecheck(externdcl[i], ctxExpr)
}
}
func numNonClosures(list []*ir.Node) int {
count := 0
for _, n := range list {
- if n.Func.OClosure == nil {
+ if n.Func().OClosure == nil {
count++
}
}
if n == nil {
continue
}
- if n.Op == ir.OPACK {
+ if n.Op() == ir.OPACK {
// throw away top-level package name left over
// from previous file.
// leave s->block set to cause redeclaration
// errors if a conflicting top-level name is
// introduced by a different file.
- if !n.Name.Used() && base.SyntaxErrors() == 0 {
- unused = append(unused, importedPkg{n.Pos, n.Name.Pkg.Path, s.Name})
+ if !n.Name().Used() && base.SyntaxErrors() == 0 {
+ unused = append(unused, importedPkg{n.Pos(), n.Name().Pkg.Path, s.Name})
}
s.Def = nil
continue
if IsAlias(s) {
// throw away top-level name left over
// from previous import . "x"
- if n.Name != nil && n.Name.Pack != nil && !n.Name.Pack.Name.Used() && base.SyntaxErrors() == 0 {
- unused = append(unused, importedPkg{n.Name.Pack.Pos, n.Name.Pack.Name.Pkg.Path, ""})
- n.Name.Pack.Name.SetUsed(true)
+ if n.Name() != nil && n.Name().Pack != nil && !n.Name().Pack.Name().Used() && base.SyntaxErrors() == 0 {
+ unused = append(unused, importedPkg{n.Name().Pack.Pos(), n.Name().Pack.Name().Pkg.Path, ""})
+ n.Name().Pack.Name().SetUsed(true)
}
s.Def = nil
continue
}
func IsAlias(sym *types.Sym) bool {
- return sym.Def != nil && ir.AsNode(sym.Def).Sym != sym
+ return sym.Def != nil && ir.AsNode(sym.Def).Sym() != sym
}
// recordFlags records the specified command-line flags to be placed
if body == nil {
body = []*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}
}
- fn.Nbody.Set(body)
+ fn.PtrBody().Set(body)
base.Pos = p.makeXPos(block.Rbrace)
- fn.Func.Endlineno = base.Pos
+ fn.Func().Endlineno = base.Pos
}
funcbody()
types.Markdcl()
if trackScopes {
- Curfn.Func.Parents = append(Curfn.Func.Parents, p.scope)
- p.scopeVars = append(p.scopeVars, len(Curfn.Func.Dcl))
- p.scope = ir.ScopeID(len(Curfn.Func.Parents))
+ Curfn.Func().Parents = append(Curfn.Func().Parents, p.scope)
+ p.scopeVars = append(p.scopeVars, len(Curfn.Func().Dcl))
+ p.scope = ir.ScopeID(len(Curfn.Func().Parents))
p.markScope(pos)
}
if trackScopes {
scopeVars := p.scopeVars[len(p.scopeVars)-1]
p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
- if scopeVars == len(Curfn.Func.Dcl) {
+ if scopeVars == len(Curfn.Func().Dcl) {
// no variables were declared in this scope, so we can retract it.
- if int(p.scope) != len(Curfn.Func.Parents) {
+ if int(p.scope) != len(Curfn.Func().Parents) {
base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
}
- p.scope = Curfn.Func.Parents[p.scope-1]
- Curfn.Func.Parents = Curfn.Func.Parents[:len(Curfn.Func.Parents)-1]
+ p.scope = Curfn.Func().Parents[p.scope-1]
+ Curfn.Func().Parents = Curfn.Func().Parents[:len(Curfn.Func().Parents)-1]
- nmarks := len(Curfn.Func.Marks)
- Curfn.Func.Marks[nmarks-1].Scope = p.scope
+ nmarks := len(Curfn.Func().Marks)
+ Curfn.Func().Marks[nmarks-1].Scope = p.scope
prevScope := ir.ScopeID(0)
if nmarks >= 2 {
- prevScope = Curfn.Func.Marks[nmarks-2].Scope
+ prevScope = Curfn.Func().Marks[nmarks-2].Scope
}
- if Curfn.Func.Marks[nmarks-1].Scope == prevScope {
- Curfn.Func.Marks = Curfn.Func.Marks[:nmarks-1]
+ if Curfn.Func().Marks[nmarks-1].Scope == prevScope {
+ Curfn.Func().Marks = Curfn.Func().Marks[:nmarks-1]
}
return
}
- p.scope = Curfn.Func.Parents[p.scope-1]
+ p.scope = Curfn.Func().Parents[p.scope-1]
p.markScope(pos)
}
func (p *noder) markScope(pos syntax.Pos) {
xpos := p.makeXPos(pos)
- if i := len(Curfn.Func.Marks); i > 0 && Curfn.Func.Marks[i-1].Pos == xpos {
- Curfn.Func.Marks[i-1].Scope = p.scope
+ if i := len(Curfn.Func().Marks); i > 0 && Curfn.Func().Marks[i-1].Pos == xpos {
+ Curfn.Func().Marks[i-1].Scope = p.scope
} else {
- Curfn.Func.Marks = append(Curfn.Func.Marks, ir.Mark{Pos: xpos, Scope: p.scope})
+ Curfn.Func().Marks = append(Curfn.Func().Marks, ir.Mark{Pos: xpos, Scope: p.scope})
}
}
}
pack := p.nod(imp, ir.OPACK, nil, nil)
- pack.Sym = my
- pack.Name.Pkg = ipkg
+ pack.SetSym(my)
+ pack.Name().Pkg = ipkg
switch my.Name {
case ".":
importdot(ipkg, pack)
return
case "init":
- base.ErrorfAt(pack.Pos, "cannot import package as init - init must be a func")
+ base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func")
return
case "_":
return
}
if my.Def != nil {
- redeclare(pack.Pos, my, "as imported package name")
+ redeclare(pack.Pos(), my, "as imported package name")
}
my.Def = ir.AsTypesNode(pack)
- my.Lastlineno = pack.Pos
+ my.Lastlineno = pack.Pos()
my.Block = 1 // at top level
}
}
v := values[i]
if decl.Values == nil {
- v = treecopy(v, n.Pos)
+ v = treecopy(v, n.Pos())
}
- n.Op = ir.OLITERAL
+ n.SetOp(ir.OLITERAL)
declare(n, dclcontext)
- n.Name.Param.Ntype = typ
- n.Name.Defn = v
+ n.Name().Param.Ntype = typ
+ n.Name().Defn = v
n.SetIota(cs.iota)
nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil))
func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node {
n := p.declName(decl.Name)
- n.Op = ir.OTYPE
+ n.SetOp(ir.OTYPE)
declare(n, dclcontext)
// decl.Type may be nil but in that case we got a syntax error during parsing
typ := p.typeExprOrNil(decl.Type)
- param := n.Name.Param
+ param := n.Name().Param
param.Ntype = typ
param.SetAlias(decl.Alias)
if pragma, ok := decl.Pragma.(*Pragma); ok {
nod := p.nod(decl, ir.ODCLTYPE, n, nil)
if param.Alias() && !langSupported(1, 9, ir.LocalPkg) {
- base.ErrorfAt(nod.Pos, "type aliases only supported as of -lang=go1.9")
+ base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9")
}
return nod
}
func (p *noder) declName(name *syntax.Name) *ir.Node {
n := dclname(p.name(name))
- n.Pos = p.pos(name)
+ n.SetPos(p.pos(name))
return n
}
if fun.Recv == nil {
if name.Name == "init" {
name = renameinit()
- if t.List.Len() > 0 || t.Rlist.Len() > 0 {
- base.ErrorfAt(f.Pos, "func init must have no arguments and no return values")
+ if t.List().Len() > 0 || t.Rlist().Len() > 0 {
+ base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
}
}
if ir.LocalPkg.Name == "main" && name.Name == "main" {
- if t.List.Len() > 0 || t.Rlist.Len() > 0 {
- base.ErrorfAt(f.Pos, "func main must have no arguments and no return values")
+ if t.List().Len() > 0 || t.Rlist().Len() > 0 {
+ base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values")
}
}
} else {
- f.Func.Shortname = name
- name = ir.BlankNode.Sym // filled in by typecheckfunc
+ f.Func().Shortname = name
+ name = ir.BlankNode.Sym() // filled in by typecheckfunc
}
- f.Func.Nname = newfuncnamel(p.pos(fun.Name), name, f.Func)
- f.Func.Nname.Name.Defn = f
- f.Func.Nname.Name.Param.Ntype = t
+ f.Func().Nname = newfuncnamel(p.pos(fun.Name), name, f.Func())
+ f.Func().Nname.Name().Defn = f
+ f.Func().Nname.Name().Param.Ntype = t
if pragma, ok := fun.Pragma.(*Pragma); ok {
- f.Func.Pragma = pragma.Flag & FuncPragmas
+ f.Func().Pragma = pragma.Flag & FuncPragmas
if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
- base.ErrorfAt(f.Pos, "go:nosplit and go:systemstack cannot be combined")
+ base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
pragma.Flag &^= FuncPragmas
p.checkUnused(pragma)
}
if fun.Recv == nil {
- declare(f.Func.Nname, ir.PFUNC)
+ declare(f.Func().Nname, ir.PFUNC)
}
p.funcBody(f, fun.Body)
if fun.Body != nil {
- if f.Func.Pragma&ir.Noescape != 0 {
- base.ErrorfAt(f.Pos, "can only use //go:noescape with external func implementations")
+ if f.Func().Pragma&ir.Noescape != 0 {
+ base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations")
}
} else {
if base.Flag.Complete || strings.HasPrefix(ir.FuncName(f), "init.") {
}
}
if !isLinknamed {
- base.ErrorfAt(f.Pos, "missing function body")
+ base.ErrorfAt(f.Pos(), "missing function body")
}
}
}
func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.Node {
n := p.nod(typ, ir.OTFUNC, nil, nil)
if recv != nil {
- n.Left = p.param(recv, false, false)
+ n.SetLeft(p.param(recv, false, false))
}
- n.List.Set(p.params(typ.ParamList, true))
- n.Rlist.Set(p.params(typ.ResultList, false))
+ n.PtrList().Set(p.params(typ.ParamList, true))
+ n.PtrRlist().Set(p.params(typ.ResultList, false))
return n
}
n := p.nodSym(param, ir.ODCLFIELD, typ, name)
// rewrite ...T parameter
- if typ.Op == ir.ODDD {
+ if typ.Op() == ir.ODDD {
if !dddOk {
// We mark these as syntax errors to get automatic elimination
// of multiple such errors per line (see ErrorfAt in subr.go).
p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
}
}
- typ.Op = ir.OTARRAY
- typ.Right = typ.Left
- typ.Left = nil
+ typ.SetOp(ir.OTARRAY)
+ typ.SetRight(typ.Left())
+ typ.SetLeft(nil)
n.SetIsDDD(true)
- if n.Left != nil {
- n.Left.SetIsDDD(true)
+ if n.Left() != nil {
+ n.Left().SetIsDDD(true)
}
}
case *syntax.BasicLit:
n := ir.NewLiteral(p.basicLit(expr))
if expr.Kind == syntax.RuneLit {
- n.Type = types.UntypedRune
+ n.SetType(types.UntypedRune)
}
n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error
return n
case *syntax.CompositeLit:
n := p.nod(expr, ir.OCOMPLIT, nil, nil)
if expr.Type != nil {
- n.Right = p.expr(expr.Type)
+ n.SetRight(p.expr(expr.Type))
}
l := p.exprs(expr.ElemList)
for i, e := range l {
l[i] = p.wrapname(expr.ElemList[i], e)
}
- n.List.Set(l)
+ n.PtrList().Set(l)
base.Pos = p.makeXPos(expr.Rbrace)
return n
case *syntax.KeyValueExpr:
case *syntax.SelectorExpr:
// parser.new_dotname
obj := p.expr(expr.X)
- if obj.Op == ir.OPACK {
- obj.Name.SetUsed(true)
- return importName(obj.Name.Pkg.Lookup(expr.Sel.Value))
+ if obj.Op() == ir.OPACK {
+ obj.Name().SetUsed(true)
+ return importName(obj.Name().Pkg.Lookup(expr.Sel.Value))
}
n := nodSym(ir.OXDOT, obj, p.name(expr.Sel))
- n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X)
+ n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X)
return n
case *syntax.IndexExpr:
return p.nod(expr, ir.OINDEX, p.expr(expr.X), p.expr(expr.Index))
return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y))
case *syntax.CallExpr:
n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil)
- n.List.Set(p.exprs(expr.ArgList))
+ n.PtrList().Set(p.exprs(expr.ArgList))
n.SetIsDDD(expr.HasDots)
return n
case *syntax.TypeSwitchGuard:
n := p.nod(expr, ir.OTYPESW, nil, p.expr(expr.X))
if expr.Lhs != nil {
- n.Left = p.declName(expr.Lhs)
- if ir.IsBlank(n.Left) {
- base.Errorf("invalid variable name %v in type switch", n.Left)
+ n.SetLeft(p.declName(expr.Lhs))
+ if ir.IsBlank(n.Left()) {
+ base.Errorf("invalid variable name %v in type switch", n.Left())
}
}
return n
chunks := make([]string, 0, 1)
n := p.expr(x)
- if ir.IsConst(n, constant.String) && n.Sym == nil {
+ if ir.IsConst(n, constant.String) && n.Sym() == nil {
nstr = n
chunks = append(chunks, nstr.StringVal())
}
add := adds[i]
r := p.expr(add.Y)
- if ir.IsConst(r, constant.String) && r.Sym == nil {
+ if ir.IsConst(r, constant.String) && r.Sym() == nil {
if nstr != nil {
// Collapse r into nstr instead of adding to n.
chunks = append(chunks, r.StringVal())
p.setlineno(expr)
n := p.nod(expr, ir.OTSTRUCT, nil, nil)
- n.List.Set(l)
+ n.PtrList().Set(l)
return n
}
} else {
mname := p.name(method.Name)
sig := p.typeExpr(method.Type)
- sig.Left = fakeRecv()
+ sig.SetLeft(fakeRecv())
n = p.nodSym(method, ir.ODCLFIELD, sig, mname)
ifacedcl(n)
}
}
n := p.nod(expr, ir.OTINTER, nil, nil)
- n.List.Set(l)
+ n.PtrList().Set(l)
return n
}
switch expr := expr.(type) {
case *syntax.Name:
name := p.name(expr)
- if n := oldname(name); n.Name != nil && n.Name.Pack != nil {
- n.Name.Pack.Name.SetUsed(true)
+ if n := oldname(name); n.Name() != nil && n.Name().Pack != nil {
+ n.Name().Pack.Name().SetUsed(true)
}
return name
case *syntax.SelectorExpr:
return name
}
var pkg *types.Pkg
- if def.Op != ir.OPACK {
+ if def.Op() != ir.OPACK {
base.Errorf("%v is not a package", name)
pkg = ir.LocalPkg
} else {
- def.Name.SetUsed(true)
- pkg = def.Name.Pkg
+ def.Name().SetUsed(true)
+ pkg = def.Name().Pkg
}
return pkg.Lookup(expr.Sel.Value)
}
n.SetEmbedded(true)
if isStar {
- n.Left = p.nod(op, ir.ODEREF, n.Left, nil)
+ n.SetLeft(p.nod(op, ir.ODEREF, n.Left(), nil))
}
return n
}
for i, stmt := range stmts {
s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
- } else if s.Op == ir.OBLOCK && s.Ninit.Len() == 0 {
- nodes = append(nodes, s.List.Slice()...)
+ } else if s.Op() == ir.OBLOCK && s.Init().Len() == 0 {
+ nodes = append(nodes, s.List().Slice()...)
} else {
nodes = append(nodes, s)
}
if len(lhs) == 1 && len(rhs) == 1 {
// common case
- n.Left = lhs[0]
- n.Right = rhs[0]
+ n.SetLeft(lhs[0])
+ n.SetRight(rhs[0])
} else {
- n.Op = ir.OAS2
- n.List.Set(lhs)
- n.Rlist.Set(rhs)
+ n.SetOp(ir.OAS2)
+ n.PtrList().Set(lhs)
+ n.PtrRlist().Set(rhs)
}
return n
}
n := p.nod(stmt, op, nil, nil)
if stmt.Label != nil {
- n.Sym = p.name(stmt.Label)
+ n.SetSym(p.name(stmt.Label))
}
return n
case *syntax.CallStmt:
results = p.exprList(stmt.Results)
}
n := p.nod(stmt, ir.ORETURN, nil, nil)
- n.List.Set(results)
- if n.List.Len() == 0 && Curfn != nil {
- for _, ln := range Curfn.Func.Dcl {
+ n.PtrList().Set(results)
+ if n.List().Len() == 0 && Curfn != nil {
+ for _, ln := range Curfn.Func().Dcl {
if ln.Class() == ir.PPARAM {
continue
}
if ln.Class() != ir.PPARAMOUT {
break
}
- if ir.AsNode(ln.Sym.Def) != ln {
- base.Errorf("%s is shadowed during return", ln.Sym.Name)
+ if ir.AsNode(ln.Sym().Def) != ln {
+ base.Errorf("%s is shadowed during return", ln.Sym().Name)
}
}
}
newOrErr = true
n := NewName(sym)
declare(n, dclcontext)
- n.Name.Defn = defn
- defn.Ninit.Append(ir.Nod(ir.ODCL, n, nil))
+ n.Name().Defn = defn
+ defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
res[i] = n
}
if !newOrErr {
- base.ErrorfAt(defn.Pos, "no new variables on left side of :=")
+ base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
}
return res
}
p.openScope(stmt.Pos())
n := p.nod(stmt, ir.OIF, nil, nil)
if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
+ n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Cond != nil {
- n.Left = p.expr(stmt.Cond)
+ n.SetLeft(p.expr(stmt.Cond))
}
- n.Nbody.Set(p.blockStmt(stmt.Then))
+ n.PtrBody().Set(p.blockStmt(stmt.Then))
if stmt.Else != nil {
e := p.stmt(stmt.Else)
- if e.Op == ir.OBLOCK && e.Ninit.Len() == 0 {
- n.Rlist.Set(e.List.Slice())
+ if e.Op() == ir.OBLOCK && e.Init().Len() == 0 {
+ n.PtrRlist().Set(e.List().Slice())
} else {
- n.Rlist.Set1(e)
+ n.PtrRlist().Set1(e)
}
}
p.closeAnotherScope()
n = p.nod(r, ir.ORANGE, nil, p.expr(r.X))
if r.Lhs != nil {
- n.List.Set(p.assignList(r.Lhs, n, r.Def))
+ n.PtrList().Set(p.assignList(r.Lhs, n, r.Def))
}
} else {
n = p.nod(stmt, ir.OFOR, nil, nil)
if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
+ n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Cond != nil {
- n.Left = p.expr(stmt.Cond)
+ n.SetLeft(p.expr(stmt.Cond))
}
if stmt.Post != nil {
- n.Right = p.stmt(stmt.Post)
+ n.SetRight(p.stmt(stmt.Post))
}
}
- n.Nbody.Set(p.blockStmt(stmt.Body))
+ n.PtrBody().Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
return n
}
p.openScope(stmt.Pos())
n := p.nod(stmt, ir.OSWITCH, nil, nil)
if stmt.Init != nil {
- n.Ninit.Set1(p.stmt(stmt.Init))
+ n.PtrInit().Set1(p.stmt(stmt.Init))
}
if stmt.Tag != nil {
- n.Left = p.expr(stmt.Tag)
+ n.SetLeft(p.expr(stmt.Tag))
}
- tswitch := n.Left
- if tswitch != nil && tswitch.Op != ir.OTYPESW {
+ tswitch := n.Left()
+ if tswitch != nil && tswitch.Op() != ir.OTYPESW {
tswitch = nil
}
- n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
+ n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
p.closeScope(stmt.Rbrace)
return n
n := p.nod(clause, ir.OCASE, nil, nil)
if clause.Cases != nil {
- n.List.Set(p.exprList(clause.Cases))
+ n.PtrList().Set(p.exprList(clause.Cases))
}
- if tswitch != nil && tswitch.Left != nil {
- nn := NewName(tswitch.Left.Sym)
+ if tswitch != nil && tswitch.Left() != nil {
+ nn := NewName(tswitch.Left().Sym())
declare(nn, dclcontext)
- n.Rlist.Set1(nn)
+ n.PtrRlist().Set1(nn)
// keep track of the instances for reporting unused
- nn.Name.Defn = tswitch
+ nn.Name().Defn = tswitch
}
// Trim trailing empty statements. We omit them from
body = body[:len(body)-1]
}
- n.Nbody.Set(p.stmtsFall(body, true))
- if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == ir.OFALL {
+ n.PtrBody().Set(p.stmtsFall(body, true))
+ if l := n.Body().Len(); l > 0 && n.Body().Index(l-1).Op() == ir.OFALL {
if tswitch != nil {
base.Errorf("cannot fallthrough in type switch")
}
func (p *noder) selectStmt(stmt *syntax.SelectStmt) *ir.Node {
n := p.nod(stmt, ir.OSELECT, nil, nil)
- n.List.Set(p.commClauses(stmt.Body, stmt.Rbrace))
+ n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace))
return n
}
n := p.nod(clause, ir.OCASE, nil, nil)
if clause.Comm != nil {
- n.List.Set1(p.stmt(clause.Comm))
+ n.PtrList().Set1(p.stmt(clause.Comm))
}
- n.Nbody.Set(p.stmts(clause.Body))
+ n.PtrBody().Set(p.stmts(clause.Body))
nodes = append(nodes, n)
}
if len(clauses) > 0 {
ls = p.stmtFall(label.Stmt, fallOK)
}
- lhs.Name.Defn = ls
+ lhs.Name().Defn = ls
l := []*ir.Node{lhs}
if ls != nil {
- if ls.Op == ir.OBLOCK && ls.Ninit.Len() == 0 {
- l = append(l, ls.List.Slice()...)
+ if ls.Op() == ir.OBLOCK && ls.Init().Len() == 0 {
+ l = append(l, ls.List().Slice()...)
} else {
l = append(l, ls)
}
func (p *noder) wrapname(n syntax.Node, x *ir.Node) *ir.Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
- switch x.Op {
+ switch x.Op() {
case ir.OTYPE, ir.OLITERAL:
- if x.Sym == nil {
+ if x.Sym() == nil {
break
}
fallthrough
func (p *noder) nodSym(orig syntax.Node, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node {
n := nodSym(op, left, sym)
- n.Pos = p.pos(orig)
+ n.SetPos(p.pos(orig))
return n
}
func mkname(sym *types.Sym) *ir.Node {
n := oldname(sym)
- if n.Name != nil && n.Name.Pack != nil {
- n.Name.Pack.Name.SetUsed(true)
+ if n.Name() != nil && n.Name().Pack != nil {
+ n.Name().Pack.Name().SetUsed(true)
}
return n
}
for {
for i := xtops; i < len(xtop); i++ {
n := xtop[i]
- if n.Op == ir.ODCLFUNC {
+ if n.Op() == ir.ODCLFUNC {
funccompile(n)
}
}
return
}
for _, exportn := range exportlist {
- s := exportn.Sym
+ s := exportn.Sym()
n := ir.AsNode(s.Def)
if n == nil {
continue
}
- if n.Op != ir.ONAME {
+ if n.Op() != ir.ONAME {
continue
}
if !types.IsExported(s.Name) {
if s.Pkg.Name != "main" {
continue
}
- if n.Type.Etype == types.TFUNC && n.Class() == ir.PFUNC {
+ if n.Type().Etype == types.TFUNC && n.Class() == ir.PFUNC {
// function
- ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type})
+ ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type()})
} else {
// variable
- ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(ir.AsNode(s.Def).Type)})
+ ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(ir.AsNode(s.Def).Type())})
}
}
}
func dumpGlobal(n *ir.Node) {
- if n.Type == nil {
+ if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
}
if n.Class() == ir.PFUNC {
return
}
- if n.Sym.Pkg != ir.LocalPkg {
+ if n.Sym().Pkg != ir.LocalPkg {
return
}
- dowidth(n.Type)
+ dowidth(n.Type())
ggloblnod(n)
}
func dumpGlobalConst(n *ir.Node) {
// only export typed constants
- t := n.Type
+ t := n.Type()
if t == nil {
return
}
- if n.Sym.Pkg != ir.LocalPkg {
+ if n.Sym().Pkg != ir.LocalPkg {
return
}
// only export integer constants for now
return
}
}
- base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym.Name, typesymname(t), ir.Int64Val(t, v))
+ base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.Int64Val(t, v))
}
func dumpglobls() {
// add globals
for _, n := range externdcl {
- switch n.Op {
+ switch n.Op() {
case ir.ONAME:
dumpGlobal(n)
case ir.OLITERAL:
if readonly {
sym = stringsym(pos, string(data))
} else {
- sym = slicedata(pos, string(data)).Sym.Linksym()
+ sym = slicedata(pos, string(data)).Sym().Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
- symdata = slicedata(pos, "").Sym.Linksym()
+ symdata = slicedata(pos, "").Sym().Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
}
func slicebytes(nam *ir.Node, s string) {
- if nam.Op != ir.ONAME {
+ if nam.Op() != ir.ONAME {
base.Fatalf("slicebytes %v", nam)
}
- slicesym(nam, slicedata(nam.Pos, s), int64(len(s)))
+ slicesym(nam, slicedata(nam.Pos(), s), int64(len(s)))
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
// arr must be an ONAME. slicesym does not modify n.
func slicesym(n, arr *ir.Node, lencap int64) {
- s := n.Sym.Linksym()
- off := n.Xoffset
- if arr.Op != ir.ONAME {
+ s := n.Sym().Linksym()
+ off := n.Offset()
+ if arr.Op() != ir.ONAME {
base.Fatalf("slicesym non-name arr %v", arr)
}
- s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym.Linksym(), arr.Xoffset)
+ s.WriteAddr(base.Ctxt, off, Widthptr, arr.Sym().Linksym(), arr.Offset())
s.WriteInt(base.Ctxt, off+sliceLenOffset, Widthptr, lencap)
s.WriteInt(base.Ctxt, off+sliceCapOffset, Widthptr, lencap)
}
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
func addrsym(n, a *ir.Node) {
- if n.Op != ir.ONAME {
- base.Fatalf("addrsym n op %v", n.Op)
+ if n.Op() != ir.ONAME {
+ base.Fatalf("addrsym n op %v", n.Op())
}
- if n.Sym == nil {
+ if n.Sym() == nil {
base.Fatalf("addrsym nil n sym")
}
- if a.Op != ir.ONAME {
- base.Fatalf("addrsym a op %v", a.Op)
+ if a.Op() != ir.ONAME {
+ base.Fatalf("addrsym a op %v", a.Op())
}
- s := n.Sym.Linksym()
- s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, a.Sym.Linksym(), a.Xoffset)
+ s := n.Sym().Linksym()
+ s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, a.Sym().Linksym(), a.Offset())
}
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
func pfuncsym(n, f *ir.Node) {
- if n.Op != ir.ONAME {
- base.Fatalf("pfuncsym n op %v", n.Op)
+ if n.Op() != ir.ONAME {
+ base.Fatalf("pfuncsym n op %v", n.Op())
}
- if n.Sym == nil {
+ if n.Sym() == nil {
base.Fatalf("pfuncsym nil n sym")
}
if f.Class() != ir.PFUNC {
base.Fatalf("pfuncsym class not PFUNC %d", f.Class())
}
- s := n.Sym.Linksym()
- s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, funcsym(f.Sym).Linksym(), f.Xoffset)
+ s := n.Sym().Linksym()
+ s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, funcsym(f.Sym()).Linksym(), f.Offset())
}
// litsym writes the static literal c to n.
// Neither n nor c is modified.
func litsym(n, c *ir.Node, wid int) {
- if n.Op != ir.ONAME {
- base.Fatalf("litsym n op %v", n.Op)
+ if n.Op() != ir.ONAME {
+ base.Fatalf("litsym n op %v", n.Op())
}
- if n.Sym == nil {
+ if n.Sym() == nil {
base.Fatalf("litsym nil n sym")
}
- if !types.Identical(n.Type, c.Type) {
- base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type, c, c.Type)
+ if !types.Identical(n.Type(), c.Type()) {
+ base.Fatalf("litsym: type mismatch: %v has type %v, but %v has type %v", n, n.Type(), c, c.Type())
}
- if c.Op == ir.ONIL {
+ if c.Op() == ir.ONIL {
return
}
- if c.Op != ir.OLITERAL {
- base.Fatalf("litsym c op %v", c.Op)
+ if c.Op() != ir.OLITERAL {
+ base.Fatalf("litsym c op %v", c.Op())
}
- s := n.Sym.Linksym()
+ s := n.Sym().Linksym()
switch u := c.Val(); u.Kind() {
case constant.Bool:
i := int64(obj.Bool2int(constant.BoolVal(u)))
- s.WriteInt(base.Ctxt, n.Xoffset, wid, i)
+ s.WriteInt(base.Ctxt, n.Offset(), wid, i)
case constant.Int:
- s.WriteInt(base.Ctxt, n.Xoffset, wid, ir.Int64Val(n.Type, u))
+ s.WriteInt(base.Ctxt, n.Offset(), wid, ir.Int64Val(n.Type(), u))
case constant.Float:
f, _ := constant.Float64Val(u)
- switch n.Type.Etype {
+ switch n.Type().Etype {
case types.TFLOAT32:
- s.WriteFloat32(base.Ctxt, n.Xoffset, float32(f))
+ s.WriteFloat32(base.Ctxt, n.Offset(), float32(f))
case types.TFLOAT64:
- s.WriteFloat64(base.Ctxt, n.Xoffset, f)
+ s.WriteFloat64(base.Ctxt, n.Offset(), f)
}
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
- switch n.Type.Etype {
+ switch n.Type().Etype {
case types.TCOMPLEX64:
- s.WriteFloat32(base.Ctxt, n.Xoffset, float32(re))
- s.WriteFloat32(base.Ctxt, n.Xoffset+4, float32(im))
+ s.WriteFloat32(base.Ctxt, n.Offset(), float32(re))
+ s.WriteFloat32(base.Ctxt, n.Offset()+4, float32(im))
case types.TCOMPLEX128:
- s.WriteFloat64(base.Ctxt, n.Xoffset, re)
- s.WriteFloat64(base.Ctxt, n.Xoffset+8, im)
+ s.WriteFloat64(base.Ctxt, n.Offset(), re)
+ s.WriteFloat64(base.Ctxt, n.Offset()+8, im)
}
case constant.String:
i := constant.StringVal(u)
- symdata := stringsym(n.Pos, i)
- s.WriteAddr(base.Ctxt, n.Xoffset, Widthptr, symdata, 0)
- s.WriteInt(base.Ctxt, n.Xoffset+int64(Widthptr), Widthptr, int64(len(i)))
+ symdata := stringsym(n.Pos(), i)
+ s.WriteAddr(base.Ctxt, n.Offset(), Widthptr, symdata, 0)
+ s.WriteInt(base.Ctxt, n.Offset()+int64(Widthptr), Widthptr, int64(len(i)))
default:
base.Fatalf("litsym unhandled OLITERAL %v", c)
// described in the comment at the top of the file.
func order(fn *ir.Node) {
if base.Flag.W > 1 {
- s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
- ir.DumpList(s, fn.Nbody)
+ s := fmt.Sprintf("\nbefore order %v", fn.Func().Nname.Sym())
+ ir.DumpList(s, fn.Body())
}
- orderBlock(&fn.Nbody, map[string][]*ir.Node{})
+ orderBlock(fn.PtrBody(), map[string][]*ir.Node{})
}
// newTemp allocates a new temporary with the given type,
key := t.LongString()
a := o.free[key]
for i, n := range a {
- if types.Identical(t, n.Type) {
+ if types.Identical(t, n.Type()) {
v = a[i]
a[i] = a[len(a)-1]
a = a[:len(a)-1]
return nil
}
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
case ir.OLEN, ir.OCAP:
- l := o.cheapExpr(n.Left)
- if l == n.Left {
+ l := o.cheapExpr(n.Left())
+ if l == n.Left() {
return n
}
a := ir.SepCopy(n)
- a.Left = l
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
}
- return o.copyExpr(n, n.Type, false)
+ return o.copyExpr(n, n.Type(), false)
}
// safeExpr returns a safe version of n.
//
// The intended use is to apply to x when rewriting x += y into x = x + y.
func (o *Order) safeExpr(n *ir.Node) *ir.Node {
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
case ir.ODOT, ir.OLEN, ir.OCAP:
- l := o.safeExpr(n.Left)
- if l == n.Left {
+ l := o.safeExpr(n.Left())
+ if l == n.Left() {
return n
}
a := ir.SepCopy(n)
- a.Left = l
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
case ir.ODOTPTR, ir.ODEREF:
- l := o.cheapExpr(n.Left)
- if l == n.Left {
+ l := o.cheapExpr(n.Left())
+ if l == n.Left() {
return n
}
a := ir.SepCopy(n)
- a.Left = l
+ a.SetLeft(l)
return typecheck(a, ctxExpr)
case ir.OINDEX, ir.OINDEXMAP:
var l *ir.Node
- if n.Left.Type.IsArray() {
- l = o.safeExpr(n.Left)
+ if n.Left().Type().IsArray() {
+ l = o.safeExpr(n.Left())
} else {
- l = o.cheapExpr(n.Left)
+ l = o.cheapExpr(n.Left())
}
- r := o.cheapExpr(n.Right)
- if l == n.Left && r == n.Right {
+ r := o.cheapExpr(n.Right())
+ if l == n.Left() && r == n.Right() {
return n
}
a := ir.SepCopy(n)
- a.Left = l
- a.Right = r
+ a.SetLeft(l)
+ a.SetRight(r)
return typecheck(a, ctxExpr)
default:
- base.Fatalf("order.safeExpr %v", n.Op)
+ base.Fatalf("order.safeExpr %v", n.Op())
return nil // not reached
}
}
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
func isaddrokay(n *ir.Node) bool {
- return islvalue(n) && (n.Op != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n))
+ return islvalue(n) && (n.Op() != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n))
}
// addrTemp ensures that n is okay to pass by address to runtime routines.
// The result of addrTemp MUST be assigned back to n, e.g.
// n.Left = o.addrTemp(n.Left)
func (o *Order) addrTemp(n *ir.Node) *ir.Node {
- if n.Op == ir.OLITERAL || n.Op == ir.ONIL {
+ if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// TODO: expand this to all static composite literal nodes?
n = defaultlit(n, nil)
- dowidth(n.Type)
- vstat := readonlystaticname(n.Type)
+ dowidth(n.Type())
+ vstat := readonlystaticname(n.Type())
var s InitSchedule
s.staticassign(vstat, n)
if s.out != nil {
if isaddrokay(n) {
return n
}
- return o.copyExpr(n, n.Type, false)
+ return o.copyExpr(n, n.Type(), false)
}
// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
// comes up in important cases in practice. See issue 3512.
func mapKeyReplaceStrConv(n *ir.Node) bool {
var replaced bool
- switch n.Op {
+ switch n.Op() {
case ir.OBYTES2STR:
- n.Op = ir.OBYTES2STRTMP
+ n.SetOp(ir.OBYTES2STRTMP)
replaced = true
case ir.OSTRUCTLIT:
- for _, elem := range n.List.Slice() {
- if mapKeyReplaceStrConv(elem.Left) {
+ for _, elem := range n.List().Slice() {
+ if mapKeyReplaceStrConv(elem.Left()) {
replaced = true
}
}
case ir.OARRAYLIT:
- for _, elem := range n.List.Slice() {
- if elem.Op == ir.OKEY {
- elem = elem.Right
+ for _, elem := range n.List().Slice() {
+ if elem.Op() == ir.OKEY {
+ elem = elem.Right()
}
if mapKeyReplaceStrConv(elem) {
replaced = true
// which must have been returned by markTemp.
func (o *Order) popTemp(mark ordermarker) {
for _, n := range o.temp[mark:] {
- key := n.Type.LongString()
+ key := n.Type().LongString()
o.free[key] = append(o.free[key], n)
}
o.temp = o.temp[:mark]
asn := s[0]
copyn := s[1]
- if asn == nil || asn.Op != ir.OAS {
+ if asn == nil || asn.Op() != ir.OAS {
return
}
- if asn.Left.Op != ir.ONAME {
+ if asn.Left().Op() != ir.ONAME {
return
}
- if ir.IsBlank(asn.Left) {
+ if ir.IsBlank(asn.Left()) {
return
}
- maken := asn.Right
- if maken == nil || maken.Op != ir.OMAKESLICE {
+ maken := asn.Right()
+ if maken == nil || maken.Op() != ir.OMAKESLICE {
return
}
- if maken.Esc == EscNone {
+ if maken.Esc() == EscNone {
return
}
- if maken.Left == nil || maken.Right != nil {
+ if maken.Left() == nil || maken.Right() != nil {
return
}
- if copyn.Op != ir.OCOPY {
+ if copyn.Op() != ir.OCOPY {
return
}
- if copyn.Left.Op != ir.ONAME {
+ if copyn.Left().Op() != ir.ONAME {
return
}
- if asn.Left.Sym != copyn.Left.Sym {
+ if asn.Left().Sym() != copyn.Left().Sym() {
return
}
- if copyn.Right.Op != ir.ONAME {
+ if copyn.Right().Op() != ir.ONAME {
return
}
- if copyn.Left.Sym == copyn.Right.Sym {
+ if copyn.Left().Sym() == copyn.Right().Sym() {
return
}
- maken.Op = ir.OMAKESLICECOPY
- maken.Right = copyn.Right
+ maken.SetOp(ir.OMAKESLICECOPY)
+ maken.SetRight(copyn.Right())
// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
- maken.SetBounded(maken.Left.Op == ir.OLEN && samesafeexpr(maken.Left.Left, copyn.Right))
+ maken.SetBounded(maken.Left().Op() == ir.OLEN && samesafeexpr(maken.Left().Left(), copyn.Right()))
maken = typecheck(maken, ctxExpr)
// Create a new uint8 counter to be allocated in section
// __libfuzzer_extra_counters.
counter := staticname(types.Types[types.TUINT8])
- counter.Name.SetLibfuzzerExtraCounter(true)
+ counter.Name().SetLibfuzzerExtraCounter(true)
// counter += 1
incr := ir.Nod(ir.OASOP, counter, nodintconst(1))
if ir.MayBeShared(n) {
// For concurrency safety, don't mutate potentially shared nodes.
// First, ensure that no work is required here.
- if n.Ninit.Len() > 0 {
+ if n.Init().Len() > 0 {
base.Fatalf("order.init shared node with ninit")
}
return
}
- o.stmtList(n.Ninit)
- n.Ninit.Set(nil)
+ o.stmtList(n.Init())
+ n.PtrInit().Set(nil)
}
// call orders the call expression n.
// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
func (o *Order) call(n *ir.Node) {
- if n.Ninit.Len() > 0 {
+ if n.Init().Len() > 0 {
// Caller should have already called o.init(n).
- base.Fatalf("%v with unexpected ninit", n.Op)
+ base.Fatalf("%v with unexpected ninit", n.Op())
}
// Builtin functions.
- if n.Op != ir.OCALLFUNC && n.Op != ir.OCALLMETH && n.Op != ir.OCALLINTER {
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
+ if n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER {
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.exprList(n.List())
return
}
fixVariadicCall(n)
- n.Left = o.expr(n.Left, nil)
- o.exprList(n.List)
+ n.SetLeft(o.expr(n.Left(), nil))
+ o.exprList(n.List())
- if n.Op == ir.OCALLINTER {
+ if n.Op() == ir.OCALLINTER {
return
}
keepAlive := func(arg *ir.Node) {
// arrange for the pointer to be kept alive until the call returns,
// by copying it into a temp and marking that temp
// still alive when we pop the temp stack.
- if arg.Op == ir.OCONVNOP && arg.Left.Type.IsUnsafePtr() {
- x := o.copyExpr(arg.Left, arg.Left.Type, false)
- arg.Left = x
- x.Name.SetAddrtaken(true) // ensure SSA keeps the x variable
- n.Nbody.Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt))
+ if arg.Op() == ir.OCONVNOP && arg.Left().Type().IsUnsafePtr() {
+ x := o.copyExpr(arg.Left(), arg.Left().Type(), false)
+ arg.SetLeft(x)
+ x.Name().SetAddrtaken(true) // ensure SSA keeps the x variable
+ n.PtrBody().Append(typecheck(ir.Nod(ir.OVARLIVE, x, nil), ctxStmt))
}
}
// Check for "unsafe-uintptr" tag provided by escape analysis.
- for i, param := range n.Left.Type.Params().FieldSlice() {
+ for i, param := range n.Left().Type().Params().FieldSlice() {
if param.Note == unsafeUintptrTag || param.Note == uintptrEscapesTag {
- if arg := n.List.Index(i); arg.Op == ir.OSLICELIT {
- for _, elt := range arg.List.Slice() {
+ if arg := n.List().Index(i); arg.Op() == ir.OSLICELIT {
+ for _, elt := range arg.List().Slice() {
keepAlive(elt)
}
} else {
// And this only applies to the multiple-assignment form.
// We could do a more precise analysis if needed, like in walk.go.
func (o *Order) mapAssign(n *ir.Node) {
- switch n.Op {
+ switch n.Op() {
default:
- base.Fatalf("order.mapAssign %v", n.Op)
+ base.Fatalf("order.mapAssign %v", n.Op())
case ir.OAS, ir.OASOP:
- if n.Left.Op == ir.OINDEXMAP {
+ if n.Left().Op() == ir.OINDEXMAP {
// Make sure we evaluate the RHS before starting the map insert.
// We need to make sure the RHS won't panic. See issue 22881.
- if n.Right.Op == ir.OAPPEND {
- s := n.Right.List.Slice()[1:]
+ if n.Right().Op() == ir.OAPPEND {
+ s := n.Right().List().Slice()[1:]
for i, n := range s {
s[i] = o.cheapExpr(n)
}
} else {
- n.Right = o.cheapExpr(n.Right)
+ n.SetRight(o.cheapExpr(n.Right()))
}
}
o.out = append(o.out, n)
case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC:
var post []*ir.Node
- for i, m := range n.List.Slice() {
+ for i, m := range n.List().Slice() {
switch {
- case m.Op == ir.OINDEXMAP:
- if !ir.IsAutoTmp(m.Left) {
- m.Left = o.copyExpr(m.Left, m.Left.Type, false)
+ case m.Op() == ir.OINDEXMAP:
+ if !ir.IsAutoTmp(m.Left()) {
+ m.SetLeft(o.copyExpr(m.Left(), m.Left().Type(), false))
}
- if !ir.IsAutoTmp(m.Right) {
- m.Right = o.copyExpr(m.Right, m.Right.Type, false)
+ if !ir.IsAutoTmp(m.Right()) {
+ m.SetRight(o.copyExpr(m.Right(), m.Right().Type(), false))
}
fallthrough
- case instrumenting && n.Op == ir.OAS2FUNC && !ir.IsBlank(m):
- t := o.newTemp(m.Type, false)
- n.List.SetIndex(i, t)
+ case instrumenting && n.Op() == ir.OAS2FUNC && !ir.IsBlank(m):
+ t := o.newTemp(m.Type(), false)
+ n.List().SetIndex(i, t)
a := ir.Nod(ir.OAS, m, t)
a = typecheck(a, ctxStmt)
post = append(post, a)
lno := setlineno(n)
o.init(n)
- switch n.Op {
+ switch n.Op() {
default:
- base.Fatalf("order.stmt %v", n.Op)
+ base.Fatalf("order.stmt %v", n.Op())
case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
o.out = append(o.out, n)
case ir.OAS:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, n.Left)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), n.Left()))
o.mapAssign(n)
o.cleanTemp(t)
case ir.OASOP:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
- if instrumenting || n.Left.Op == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) {
+ if instrumenting || n.Left().Op() == ir.OINDEXMAP && (n.SubOp() == ir.ODIV || n.SubOp() == ir.OMOD) {
// Rewrite m[k] op= r into m[k] = m[k] op r so
// that we can ensure that if op panics
// because r is zero, the panic happens before
// the map assignment.
- n.Left = o.safeExpr(n.Left)
+ n.SetLeft(o.safeExpr(n.Left()))
- l := treecopy(n.Left, src.NoXPos)
- if l.Op == ir.OINDEXMAP {
+ l := treecopy(n.Left(), src.NoXPos)
+ if l.Op() == ir.OINDEXMAP {
l.SetIndexMapLValue(false)
}
- l = o.copyExpr(l, n.Left.Type, false)
- n.Right = ir.Nod(n.SubOp(), l, n.Right)
- n.Right = typecheck(n.Right, ctxExpr)
- n.Right = o.expr(n.Right, nil)
+ l = o.copyExpr(l, n.Left().Type(), false)
+ n.SetRight(ir.Nod(n.SubOp(), l, n.Right()))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ n.SetRight(o.expr(n.Right(), nil))
- n.Op = ir.OAS
+ n.SetOp(ir.OAS)
n.ResetAux()
}
case ir.OAS2:
t := o.markTemp()
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
o.mapAssign(n)
o.cleanTemp(t)
// Special: avoid copy of func call n.Right
case ir.OAS2FUNC:
t := o.markTemp()
- o.exprList(n.List)
- o.init(n.Right)
- o.call(n.Right)
+ o.exprList(n.List())
+ o.init(n.Right())
+ o.call(n.Right())
o.as2(n)
o.cleanTemp(t)
// and make sure OINDEXMAP is not copied out.
case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
t := o.markTemp()
- o.exprList(n.List)
+ o.exprList(n.List())
- switch r := n.Right; r.Op {
+ switch r := n.Right(); r.Op() {
case ir.ODOTTYPE2, ir.ORECV:
- r.Left = o.expr(r.Left, nil)
+ r.SetLeft(o.expr(r.Left(), nil))
case ir.OINDEXMAP:
- r.Left = o.expr(r.Left, nil)
- r.Right = o.expr(r.Right, nil)
+ r.SetLeft(o.expr(r.Left(), nil))
+ r.SetRight(o.expr(r.Right(), nil))
// See similar conversion for OINDEXMAP below.
- _ = mapKeyReplaceStrConv(r.Right)
- r.Right = o.mapKeyTemp(r.Left.Type, r.Right)
+ _ = mapKeyReplaceStrConv(r.Right())
+ r.SetRight(o.mapKeyTemp(r.Left().Type(), r.Right()))
default:
- base.Fatalf("order.stmt: %v", r.Op)
+ base.Fatalf("order.stmt: %v", r.Op())
}
o.okAs2(n)
// Special: does not save n onto out.
case ir.OBLOCK, ir.OEMPTY:
- o.stmtList(n.List)
+ o.stmtList(n.List())
// Special: n->left is not an expression; save as is.
case ir.OBREAK,
ir.ORECOVER,
ir.ORECV:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
o.out = append(o.out, n)
o.cleanTemp(t)
// Special: order arguments to inner call but not call itself.
case ir.ODEFER, ir.OGO:
t := o.markTemp()
- o.init(n.Left)
- o.call(n.Left)
+ o.init(n.Left())
+ o.call(n.Left())
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.ODELETE:
t := o.markTemp()
- n.List.SetFirst(o.expr(n.List.First(), nil))
- n.List.SetSecond(o.expr(n.List.Second(), nil))
- n.List.SetSecond(o.mapKeyTemp(n.List.First().Type, n.List.Second()))
+ n.List().SetFirst(o.expr(n.List().First(), nil))
+ n.List().SetSecond(o.expr(n.List().Second(), nil))
+ n.List().SetSecond(o.mapKeyTemp(n.List().First().Type(), n.List().Second()))
o.out = append(o.out, n)
o.cleanTemp(t)
// beginning of loop body and after for statement.
case ir.OFOR:
t := o.markTemp()
- n.Left = o.exprInPlace(n.Left)
- n.Nbody.Prepend(o.cleanTempNoPop(t)...)
- orderBlock(&n.Nbody, o.free)
- n.Right = orderStmtInPlace(n.Right, o.free)
+ n.SetLeft(o.exprInPlace(n.Left()))
+ n.PtrBody().Prepend(o.cleanTempNoPop(t)...)
+ orderBlock(n.PtrBody(), o.free)
+ n.SetRight(orderStmtInPlace(n.Right(), o.free))
o.out = append(o.out, n)
o.cleanTemp(t)
// beginning of both branches.
case ir.OIF:
t := o.markTemp()
- n.Left = o.exprInPlace(n.Left)
- n.Nbody.Prepend(o.cleanTempNoPop(t)...)
- n.Rlist.Prepend(o.cleanTempNoPop(t)...)
+ n.SetLeft(o.exprInPlace(n.Left()))
+ n.PtrBody().Prepend(o.cleanTempNoPop(t)...)
+ n.PtrRlist().Prepend(o.cleanTempNoPop(t)...)
o.popTemp(t)
- orderBlock(&n.Nbody, o.free)
- orderBlock(&n.Rlist, o.free)
+ orderBlock(n.PtrBody(), o.free)
+ orderBlock(n.PtrRlist(), o.free)
o.out = append(o.out, n)
// Special: argument will be converted to interface using convT2E
// so make sure it is an addressable temporary.
case ir.OPANIC:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- if !n.Left.Type.IsInterface() {
- n.Left = o.addrTemp(n.Left)
+ n.SetLeft(o.expr(n.Left(), nil))
+ if !n.Left().Type().IsInterface() {
+ n.SetLeft(o.addrTemp(n.Left()))
}
o.out = append(o.out, n)
o.cleanTemp(t)
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
- if n.Right.Op == ir.OSTR2BYTES {
- n.Right.Op = ir.OSTR2BYTESTMP
+ if n.Right().Op() == ir.OSTR2BYTES {
+ n.Right().SetOp(ir.OSTR2BYTESTMP)
}
t := o.markTemp()
- n.Right = o.expr(n.Right, nil)
+ n.SetRight(o.expr(n.Right(), nil))
orderBody := true
- switch n.Type.Etype {
+ switch n.Type().Etype {
default:
- base.Fatalf("order.stmt range %v", n.Type)
+ base.Fatalf("order.stmt range %v", n.Type())
case types.TARRAY, types.TSLICE:
- if n.List.Len() < 2 || ir.IsBlank(n.List.Second()) {
+ if n.List().Len() < 2 || ir.IsBlank(n.List().Second()) {
// for i := range x will only use x once, to compute len(x).
// No need to copy it.
break
case types.TCHAN, types.TSTRING:
// chan, string, slice, array ranges use value multiple times.
// make copy.
- r := n.Right
+ r := n.Right()
- if r.Type.IsString() && r.Type != types.Types[types.TSTRING] {
+ if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
r = ir.Nod(ir.OCONV, r, nil)
- r.Type = types.Types[types.TSTRING]
+ r.SetType(types.Types[types.TSTRING])
r = typecheck(r, ctxExpr)
}
- n.Right = o.copyExpr(r, r.Type, false)
+ n.SetRight(o.copyExpr(r, r.Type(), false))
case types.TMAP:
if isMapClear(n) {
// copy the map value in case it is a map literal.
// TODO(rsc): Make tmp = literal expressions reuse tmp.
// For maps tmp is just one word so it hardly matters.
- r := n.Right
- n.Right = o.copyExpr(r, r.Type, false)
+ r := n.Right()
+ n.SetRight(o.copyExpr(r, r.Type(), false))
// prealloc[n] is the temp for the iterator.
// hiter contains pointers and needs to be zeroed.
- prealloc[n] = o.newTemp(hiter(n.Type), true)
+ prealloc[n] = o.newTemp(hiter(n.Type()), true)
}
- o.exprListInPlace(n.List)
+ o.exprListInPlace(n.List())
if orderBody {
- orderBlock(&n.Nbody, o.free)
+ orderBlock(n.PtrBody(), o.free)
}
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.ORETURN:
- o.exprList(n.List)
+ o.exprList(n.List())
o.out = append(o.out, n)
// Special: clean case temporaries in each block entry.
case ir.OSELECT:
t := o.markTemp()
- for _, n2 := range n.List.Slice() {
- if n2.Op != ir.OCASE {
- base.Fatalf("order select case %v", n2.Op)
+ for _, n2 := range n.List().Slice() {
+ if n2.Op() != ir.OCASE {
+ base.Fatalf("order select case %v", n2.Op())
}
- r := n2.Left
+ r := n2.Left()
setlineno(n2)
// Append any new body prologue to ninit.
// The next loop will insert ninit into nbody.
- if n2.Ninit.Len() != 0 {
+ if n2.Init().Len() != 0 {
base.Fatalf("order select ninit")
}
if r == nil {
continue
}
- switch r.Op {
+ switch r.Op() {
default:
ir.Dump("select case", r)
- base.Fatalf("unknown op in select %v", r.Op)
+ base.Fatalf("unknown op in select %v", r.Op())
// If this is case x := <-ch or case x, y := <-ch, the case has
// the ODCL nodes to declare x and y. We want to delay that
case ir.OSELRECV, ir.OSELRECV2:
if r.Colas() {
i := 0
- if r.Ninit.Len() != 0 && r.Ninit.First().Op == ir.ODCL && r.Ninit.First().Left == r.Left {
+ if r.Init().Len() != 0 && r.Init().First().Op() == ir.ODCL && r.Init().First().Left() == r.Left() {
i++
}
- if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ir.ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() {
+ if i < r.Init().Len() && r.Init().Index(i).Op() == ir.ODCL && r.List().Len() != 0 && r.Init().Index(i).Left() == r.List().First() {
i++
}
- if i >= r.Ninit.Len() {
- r.Ninit.Set(nil)
+ if i >= r.Init().Len() {
+ r.PtrInit().Set(nil)
}
}
- if r.Ninit.Len() != 0 {
- ir.DumpList("ninit", r.Ninit)
+ if r.Init().Len() != 0 {
+ ir.DumpList("ninit", r.Init())
base.Fatalf("ninit on select recv")
}
// r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
// r->left == N means 'case <-c'.
// c is always evaluated; x and ok are only evaluated when assigned.
- r.Right.Left = o.expr(r.Right.Left, nil)
+ r.Right().SetLeft(o.expr(r.Right().Left(), nil))
- if r.Right.Left.Op != ir.ONAME {
- r.Right.Left = o.copyExpr(r.Right.Left, r.Right.Left.Type, false)
+ if r.Right().Left().Op() != ir.ONAME {
+ r.Right().SetLeft(o.copyExpr(r.Right().Left(), r.Right().Left().Type(), false))
}
// Introduce temporary for receive and move actual copy into case body.
// temporary per distinct type, sharing the temp among all receives
// with that temp. Similarly one ok bool could be shared among all
// the x,ok receives. Not worth doing until there's a clear need.
- if r.Left != nil && ir.IsBlank(r.Left) {
- r.Left = nil
+ if r.Left() != nil && ir.IsBlank(r.Left()) {
+ r.SetLeft(nil)
}
- if r.Left != nil {
+ if r.Left() != nil {
// use channel element type for temporary to avoid conversions,
// such as in case interfacevalue = <-intchan.
// the conversion happens in the OAS instead.
- tmp1 := r.Left
+ tmp1 := r.Left()
if r.Colas() {
tmp2 := ir.Nod(ir.ODCL, tmp1, nil)
tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ n2.PtrInit().Append(tmp2)
}
- r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers())
- tmp2 := ir.Nod(ir.OAS, tmp1, r.Left)
+ r.SetLeft(o.newTemp(r.Right().Left().Type().Elem(), r.Right().Left().Type().Elem().HasPointers()))
+ tmp2 := ir.Nod(ir.OAS, tmp1, r.Left())
tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ n2.PtrInit().Append(tmp2)
}
- if r.List.Len() != 0 && ir.IsBlank(r.List.First()) {
- r.List.Set(nil)
+ if r.List().Len() != 0 && ir.IsBlank(r.List().First()) {
+ r.PtrList().Set(nil)
}
- if r.List.Len() != 0 {
- tmp1 := r.List.First()
+ if r.List().Len() != 0 {
+ tmp1 := r.List().First()
if r.Colas() {
tmp2 := ir.Nod(ir.ODCL, tmp1, nil)
tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ n2.PtrInit().Append(tmp2)
}
- r.List.Set1(o.newTemp(types.Types[types.TBOOL], false))
- tmp2 := okas(tmp1, r.List.First())
+ r.PtrList().Set1(o.newTemp(types.Types[types.TBOOL], false))
+ tmp2 := okas(tmp1, r.List().First())
tmp2 = typecheck(tmp2, ctxStmt)
- n2.Ninit.Append(tmp2)
+ n2.PtrInit().Append(tmp2)
}
- orderBlock(&n2.Ninit, o.free)
+ orderBlock(n2.PtrInit(), o.free)
case ir.OSEND:
- if r.Ninit.Len() != 0 {
- ir.DumpList("ninit", r.Ninit)
+ if r.Init().Len() != 0 {
+ ir.DumpList("ninit", r.Init())
base.Fatalf("ninit on select send")
}
// case c <- x
// r->left is c, r->right is x, both are always evaluated.
- r.Left = o.expr(r.Left, nil)
+ r.SetLeft(o.expr(r.Left(), nil))
- if !ir.IsAutoTmp(r.Left) {
- r.Left = o.copyExpr(r.Left, r.Left.Type, false)
+ if !ir.IsAutoTmp(r.Left()) {
+ r.SetLeft(o.copyExpr(r.Left(), r.Left().Type(), false))
}
- r.Right = o.expr(r.Right, nil)
- if !ir.IsAutoTmp(r.Right) {
- r.Right = o.copyExpr(r.Right, r.Right.Type, false)
+ r.SetRight(o.expr(r.Right(), nil))
+ if !ir.IsAutoTmp(r.Right()) {
+ r.SetRight(o.copyExpr(r.Right(), r.Right().Type(), false))
}
}
}
// Now that we have accumulated all the temporaries, clean them.
// Also insert any ninit queued during the previous loop.
// (The temporary cleaning must follow that ninit work.)
- for _, n3 := range n.List.Slice() {
- orderBlock(&n3.Nbody, o.free)
- n3.Nbody.Prepend(o.cleanTempNoPop(t)...)
+ for _, n3 := range n.List().Slice() {
+ orderBlock(n3.PtrBody(), o.free)
+ n3.PtrBody().Prepend(o.cleanTempNoPop(t)...)
// TODO(mdempsky): Is this actually necessary?
// walkselect appears to walk Ninit.
- n3.Nbody.Prepend(n3.Ninit.Slice()...)
- n3.Ninit.Set(nil)
+ n3.PtrBody().Prepend(n3.Init().Slice()...)
+ n3.PtrInit().Set(nil)
}
o.out = append(o.out, n)
// Special: value being sent is passed as a pointer; make it addressable.
case ir.OSEND:
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
if instrumenting {
// Force copying to the stack so that (chan T)(nil) <- x
// is still instrumented as a read of x.
- n.Right = o.copyExpr(n.Right, n.Right.Type, false)
+ n.SetRight(o.copyExpr(n.Right(), n.Right().Type(), false))
} else {
- n.Right = o.addrTemp(n.Right)
+ n.SetRight(o.addrTemp(n.Right()))
}
o.out = append(o.out, n)
o.cleanTemp(t)
case ir.OSWITCH:
if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
// Add empty "default:" case for instrumentation.
- n.List.Append(ir.Nod(ir.OCASE, nil, nil))
+ n.PtrList().Append(ir.Nod(ir.OCASE, nil, nil))
}
t := o.markTemp()
- n.Left = o.expr(n.Left, nil)
- for _, ncas := range n.List.Slice() {
- if ncas.Op != ir.OCASE {
- base.Fatalf("order switch case %v", ncas.Op)
+ n.SetLeft(o.expr(n.Left(), nil))
+ for _, ncas := range n.List().Slice() {
+ if ncas.Op() != ir.OCASE {
+ base.Fatalf("order switch case %v", ncas.Op())
}
- o.exprListInPlace(ncas.List)
- orderBlock(&ncas.Nbody, o.free)
+ o.exprListInPlace(ncas.List())
+ orderBlock(ncas.PtrBody(), o.free)
}
o.out = append(o.out, n)
}
func hasDefaultCase(n *ir.Node) bool {
- for _, ncas := range n.List.Slice() {
- if ncas.Op != ir.OCASE {
- base.Fatalf("expected case, found %v", ncas.Op)
+ for _, ncas := range n.List().Slice() {
+ if ncas.Op() != ir.OCASE {
+ base.Fatalf("expected case, found %v", ncas.Op())
}
- if ncas.List.Len() == 0 {
+ if ncas.List().Len() == 0 {
return true
}
}
lno := setlineno(n)
o.init(n)
- switch n.Op {
+ switch n.Op() {
default:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
// Addition of strings turns into a function call.
// Allocate a temporary to hold the strings.
// Fewer than 5 strings use direct runtime helpers.
case ir.OADDSTR:
- o.exprList(n.List)
+ o.exprList(n.List())
- if n.List.Len() > 5 {
- t := types.NewArray(types.Types[types.TSTRING], int64(n.List.Len()))
+ if n.List().Len() > 5 {
+ t := types.NewArray(types.Types[types.TSTRING], int64(n.List().Len()))
prealloc[n] = o.newTemp(t, false)
}
hasbyte := false
haslit := false
- for _, n1 := range n.List.Slice() {
- hasbyte = hasbyte || n1.Op == ir.OBYTES2STR
- haslit = haslit || n1.Op == ir.OLITERAL && len(n1.StringVal()) != 0
+ for _, n1 := range n.List().Slice() {
+ hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
+ haslit = haslit || n1.Op() == ir.OLITERAL && len(n1.StringVal()) != 0
}
if haslit && hasbyte {
- for _, n2 := range n.List.Slice() {
- if n2.Op == ir.OBYTES2STR {
- n2.Op = ir.OBYTES2STRTMP
+ for _, n2 := range n.List().Slice() {
+ if n2.Op() == ir.OBYTES2STR {
+ n2.SetOp(ir.OBYTES2STRTMP)
}
}
}
case ir.OINDEXMAP:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
needCopy := false
if !n.IndexMapLValue() {
// can not be changed before the map index by forcing
// the map index to happen immediately following the
// conversions. See copyExpr a few lines below.
- needCopy = mapKeyReplaceStrConv(n.Right)
+ needCopy = mapKeyReplaceStrConv(n.Right())
if instrumenting {
// Race detector needs the copy so it can
}
// key must be addressable
- n.Right = o.mapKeyTemp(n.Left.Type, n.Right)
+ n.SetRight(o.mapKeyTemp(n.Left().Type(), n.Right()))
if needCopy {
- n = o.copyExpr(n, n.Type, false)
+ n = o.copyExpr(n, n.Type(), false)
}
// concrete type (not interface) argument might need an addressable
// temporary to pass to the runtime conversion routine.
case ir.OCONVIFACE:
- n.Left = o.expr(n.Left, nil)
- if n.Left.Type.IsInterface() {
+ n.SetLeft(o.expr(n.Left(), nil))
+ if n.Left().Type().IsInterface() {
break
}
- if _, needsaddr := convFuncName(n.Left.Type, n.Type); needsaddr || isStaticCompositeLiteral(n.Left) {
+ if _, needsaddr := convFuncName(n.Left().Type(), n.Type()); needsaddr || isStaticCompositeLiteral(n.Left()) {
// Need a temp if we need to pass the address to the conversion function.
// We also process static composite literal node here, making a named static global
// whose address we can put directly in an interface (see OCONVIFACE case in walk).
- n.Left = o.addrTemp(n.Left)
+ n.SetLeft(o.addrTemp(n.Left()))
}
case ir.OCONVNOP:
- if n.Type.IsKind(types.TUNSAFEPTR) && n.Left.Type.IsKind(types.TUINTPTR) && (n.Left.Op == ir.OCALLFUNC || n.Left.Op == ir.OCALLINTER || n.Left.Op == ir.OCALLMETH) {
+ if n.Type().IsKind(types.TUNSAFEPTR) && n.Left().Type().IsKind(types.TUINTPTR) && (n.Left().Op() == ir.OCALLFUNC || n.Left().Op() == ir.OCALLINTER || n.Left().Op() == ir.OCALLMETH) {
// When reordering unsafe.Pointer(f()) into a separate
// statement, the conversion and function call must stay
// together. See golang.org/issue/15329.
- o.init(n.Left)
- o.call(n.Left)
- if lhs == nil || lhs.Op != ir.ONAME || instrumenting {
- n = o.copyExpr(n, n.Type, false)
+ o.init(n.Left())
+ o.call(n.Left())
+ if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
+ n = o.copyExpr(n, n.Type(), false)
}
} else {
- n.Left = o.expr(n.Left, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
}
case ir.OANDAND, ir.OOROR:
// }
// ... = r
- r := o.newTemp(n.Type, false)
+ r := o.newTemp(n.Type(), false)
// Evaluate left-hand side.
- lhs := o.expr(n.Left, nil)
+ lhs := o.expr(n.Left(), nil)
o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, lhs), ctxStmt))
// Evaluate right-hand side, save generated code.
o.out = nil
t := o.markTemp()
o.edge()
- rhs := o.expr(n.Right, nil)
+ rhs := o.expr(n.Right(), nil)
o.out = append(o.out, typecheck(ir.Nod(ir.OAS, r, rhs), ctxStmt))
o.cleanTemp(t)
gen := o.out
// If left-hand side doesn't cause a short-circuit, issue right-hand side.
nif := ir.Nod(ir.OIF, r, nil)
- if n.Op == ir.OANDAND {
- nif.Nbody.Set(gen)
+ if n.Op() == ir.OANDAND {
+ nif.PtrBody().Set(gen)
} else {
- nif.Rlist.Set(gen)
+ nif.PtrRlist().Set(gen)
}
o.out = append(o.out, nif)
n = r
if isRuneCount(n) {
// len([]rune(s)) is rewritten to runtime.countrunes(s) later.
- n.Left.Left = o.expr(n.Left.Left, nil)
+ n.Left().SetLeft(o.expr(n.Left().Left(), nil))
} else {
o.call(n)
}
- if lhs == nil || lhs.Op != ir.ONAME || instrumenting {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME || instrumenting {
+ n = o.copyExpr(n, n.Type(), false)
}
case ir.OAPPEND:
// Check for append(x, make([]T, y)...) .
if isAppendOfMake(n) {
- n.List.SetFirst(o.expr(n.List.First(), nil)) // order x
- n.List.Second().Left = o.expr(n.List.Second().Left, nil) // order y
+ n.List().SetFirst(o.expr(n.List().First(), nil)) // order x
+ n.List().Second().SetLeft(o.expr(n.List().Second().Left(), nil)) // order y
} else {
- o.exprList(n.List)
+ o.exprList(n.List())
}
- if lhs == nil || lhs.Op != ir.ONAME && !samesafeexpr(lhs, n.List.First()) {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.List().First()) {
+ n = o.copyExpr(n, n.Type(), false)
}
case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
- n.Left = o.expr(n.Left, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
low, high, max := n.SliceBounds()
low = o.expr(low, nil)
low = o.cheapExpr(low)
max = o.expr(max, nil)
max = o.cheapExpr(max)
n.SetSliceBounds(low, high, max)
- if lhs == nil || lhs.Op != ir.ONAME && !samesafeexpr(lhs, n.Left) {
- n = o.copyExpr(n, n.Type, false)
+ if lhs == nil || lhs.Op() != ir.ONAME && !samesafeexpr(lhs, n.Left()) {
+ n = o.copyExpr(n, n.Type(), false)
}
case ir.OCLOSURE:
- if n.Transient() && n.Func.ClosureVars.Len() > 0 {
+ if n.Transient() && n.Func().ClosureVars.Len() > 0 {
prealloc[n] = o.newTemp(closureType(n), false)
}
case ir.OSLICELIT, ir.OCALLPART:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
- o.exprList(n.List)
- o.exprList(n.Rlist)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
+ o.exprList(n.List())
+ o.exprList(n.Rlist())
if n.Transient() {
var t *types.Type
- switch n.Op {
+ switch n.Op() {
case ir.OSLICELIT:
- t = types.NewArray(n.Type.Elem(), n.Right.Int64Val())
+ t = types.NewArray(n.Type().Elem(), n.Right().Int64Val())
case ir.OCALLPART:
t = partialCallType(n)
}
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
- n.Left = o.expr(n.Left, nil)
- if !isdirectiface(n.Type) || instrumenting {
- n = o.copyExpr(n, n.Type, true)
+ n.SetLeft(o.expr(n.Left(), nil))
+ if !isdirectiface(n.Type()) || instrumenting {
+ n = o.copyExpr(n, n.Type(), true)
}
case ir.ORECV:
- n.Left = o.expr(n.Left, nil)
- n = o.copyExpr(n, n.Type, true)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n = o.copyExpr(n, n.Type(), true)
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- n.Left = o.expr(n.Left, nil)
- n.Right = o.expr(n.Right, nil)
+ n.SetLeft(o.expr(n.Left(), nil))
+ n.SetRight(o.expr(n.Right(), nil))
- t := n.Left.Type
+ t := n.Left().Type()
switch {
case t.IsString():
// Mark string(byteSlice) arguments to reuse byteSlice backing
// buffer during conversion. String comparison does not
// memorize the strings for later use, so it is safe.
- if n.Left.Op == ir.OBYTES2STR {
- n.Left.Op = ir.OBYTES2STRTMP
+ if n.Left().Op() == ir.OBYTES2STR {
+ n.Left().SetOp(ir.OBYTES2STRTMP)
}
- if n.Right.Op == ir.OBYTES2STR {
- n.Right.Op = ir.OBYTES2STRTMP
+ if n.Right().Op() == ir.OBYTES2STR {
+ n.Right().SetOp(ir.OBYTES2STRTMP)
}
case t.IsStruct() || t.IsArray():
// for complex comparisons, we need both args to be
// addressable so we can pass them to the runtime.
- n.Left = o.addrTemp(n.Left)
- n.Right = o.addrTemp(n.Right)
+ n.SetLeft(o.addrTemp(n.Left()))
+ n.SetRight(o.addrTemp(n.Right()))
}
case ir.OMAPLIT:
// Order map by converting:
// Without this special case, order would otherwise compute all
// the keys and values before storing any of them to the map.
// See issue 26552.
- entries := n.List.Slice()
+ entries := n.List().Slice()
statics := entries[:0]
var dynamics []*ir.Node
for _, r := range entries {
- if r.Op != ir.OKEY {
+ if r.Op() != ir.OKEY {
base.Fatalf("OMAPLIT entry not OKEY: %v\n", r)
}
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
dynamics = append(dynamics, r)
continue
}
// Recursively ordering some static entries can change them to dynamic;
// e.g., OCONVIFACE nodes. See #31777.
r = o.expr(r, nil)
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
dynamics = append(dynamics, r)
continue
}
statics = append(statics, r)
}
- n.List.Set(statics)
+ n.PtrList().Set(statics)
if len(dynamics) == 0 {
break
}
// Emit the creation of the map (with all its static entries).
- m := o.newTemp(n.Type, false)
+ m := o.newTemp(n.Type(), false)
as := ir.Nod(ir.OAS, m, n)
typecheck(as, ctxStmt)
o.stmt(as)
// Emit eval+insert of dynamic entries, one at a time.
for _, r := range dynamics {
- as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, n, r.Left), r.Right)
+ as := ir.Nod(ir.OAS, ir.Nod(ir.OINDEX, n, r.Left()), r.Right())
typecheck(as, ctxStmt) // Note: this converts the OINDEX to an OINDEXMAP
o.stmt(as)
}
// including an explicit conversion if necessary.
func okas(ok, val *ir.Node) *ir.Node {
if !ir.IsBlank(ok) {
- val = conv(val, ok.Type)
+ val = conv(val, ok.Type())
}
return ir.Nod(ir.OAS, ok, val)
}
func (o *Order) as2(n *ir.Node) {
tmplist := []*ir.Node{}
left := []*ir.Node{}
- for ni, l := range n.List.Slice() {
+ for ni, l := range n.List().Slice() {
if !ir.IsBlank(l) {
- tmp := o.newTemp(l.Type, l.Type.HasPointers())
- n.List.SetIndex(ni, tmp)
+ tmp := o.newTemp(l.Type(), l.Type().HasPointers())
+ n.List().SetIndex(ni, tmp)
tmplist = append(tmplist, tmp)
left = append(left, l)
}
o.out = append(o.out, n)
as := ir.Nod(ir.OAS2, nil, nil)
- as.List.Set(left)
- as.Rlist.Set(tmplist)
+ as.PtrList().Set(left)
+ as.PtrRlist().Set(tmplist)
as = typecheck(as, ctxStmt)
o.stmt(as)
}
// Just like as2, this also adds temporaries to ensure left-to-right assignment.
func (o *Order) okAs2(n *ir.Node) {
var tmp1, tmp2 *ir.Node
- if !ir.IsBlank(n.List.First()) {
- typ := n.Right.Type
+ if !ir.IsBlank(n.List().First()) {
+ typ := n.Right().Type()
tmp1 = o.newTemp(typ, typ.HasPointers())
}
- if !ir.IsBlank(n.List.Second()) {
+ if !ir.IsBlank(n.List().Second()) {
tmp2 = o.newTemp(types.Types[types.TBOOL], false)
}
o.out = append(o.out, n)
if tmp1 != nil {
- r := ir.Nod(ir.OAS, n.List.First(), tmp1)
+ r := ir.Nod(ir.OAS, n.List().First(), tmp1)
r = typecheck(r, ctxStmt)
o.mapAssign(r)
- n.List.SetFirst(tmp1)
+ n.List().SetFirst(tmp1)
}
if tmp2 != nil {
- r := okas(n.List.Second(), tmp2)
+ r := okas(n.List().Second(), tmp2)
r = typecheck(r, ctxStmt)
o.mapAssign(r)
- n.List.SetSecond(tmp2)
+ n.List().SetSecond(tmp2)
}
}
)
func emitptrargsmap(fn *ir.Node) {
- if ir.FuncName(fn) == "_" || fn.Func.Nname.Sym.Linkname != "" {
+ if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" {
return
}
- lsym := base.Ctxt.Lookup(fn.Func.LSym.Name + ".args_stackmap")
+ lsym := base.Ctxt.Lookup(fn.Func().LSym.Name + ".args_stackmap")
- nptr := int(fn.Type.ArgWidth() / int64(Widthptr))
+ nptr := int(fn.Type().ArgWidth() / int64(Widthptr))
bv := bvalloc(int32(nptr) * 2)
nbitmap := 1
- if fn.Type.NumResults() > 0 {
+ if fn.Type().NumResults() > 0 {
nbitmap = 2
}
off := duint32(lsym, 0, uint32(nbitmap))
off = duint32(lsym, off, uint32(bv.n))
if ir.IsMethod(fn) {
- onebitwalktype1(fn.Type.Recvs(), 0, bv)
+ onebitwalktype1(fn.Type().Recvs(), 0, bv)
}
- if fn.Type.NumParams() > 0 {
- onebitwalktype1(fn.Type.Params(), 0, bv)
+ if fn.Type().NumParams() > 0 {
+ onebitwalktype1(fn.Type().Params(), 0, bv)
}
off = dbvec(lsym, off, bv)
- if fn.Type.NumResults() > 0 {
- onebitwalktype1(fn.Type.Results(), 0, bv)
+ if fn.Type().NumResults() > 0 {
+ onebitwalktype1(fn.Type().Results(), 0, bv)
off = dbvec(lsym, off, bv)
}
}
if a.Class() != ir.PAUTO {
- return a.Xoffset < b.Xoffset
+ return a.Offset() < b.Offset()
}
- if a.Name.Used() != b.Name.Used() {
- return a.Name.Used()
+ if a.Name().Used() != b.Name().Used() {
+ return a.Name().Used()
}
- ap := a.Type.HasPointers()
- bp := b.Type.HasPointers()
+ ap := a.Type().HasPointers()
+ bp := b.Type().HasPointers()
if ap != bp {
return ap
}
- ap = a.Name.Needzero()
- bp = b.Name.Needzero()
+ ap = a.Name().Needzero()
+ bp = b.Name().Needzero()
if ap != bp {
return ap
}
- if a.Type.Width != b.Type.Width {
- return a.Type.Width > b.Type.Width
+ if a.Type().Width != b.Type().Width {
+ return a.Type().Width > b.Type().Width
}
- return a.Sym.Name < b.Sym.Name
+ return a.Sym().Name < b.Sym().Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
- fn := s.curfn.Func
+ fn := s.curfn.Func()
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
if ln.Class() == ir.PAUTO {
- ln.Name.SetUsed(false)
+ ln.Name().SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
- ls.N.Name.SetUsed(true)
+ ls.N.Name().SetUsed(true)
}
}
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != nodfp {
- n.Name.SetUsed(true)
+ n.Name().SetUsed(true)
}
case ir.PAUTO:
- n.Name.SetUsed(true)
+ n.Name().SetUsed(true)
}
}
if !scratchUsed {
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
- if n.Op != ir.ONAME || n.Class() != ir.PAUTO {
+ if n.Op() != ir.ONAME || n.Class() != ir.PAUTO {
continue
}
- if !n.Name.Used() {
+ if !n.Name().Used() {
fn.Dcl = fn.Dcl[:i]
break
}
- dowidth(n.Type)
- w := n.Type.Width
+ dowidth(n.Type())
+ w := n.Type().Width
if w >= thearch.MAXWIDTH || w < 0 {
base.Fatalf("bad width")
}
w = 1
}
s.stksize += w
- s.stksize = Rnd(s.stksize, int64(n.Type.Align))
- if n.Type.HasPointers() {
+ s.stksize = Rnd(s.stksize, int64(n.Type().Align))
+ if n.Type().HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = Rnd(s.stksize, int64(Widthptr))
}
- n.Xoffset = -s.stksize
+ n.SetOffset(-s.stksize)
}
s.stksize = Rnd(s.stksize, int64(Widthreg))
func funccompile(fn *ir.Node) {
if Curfn != nil {
- base.Fatalf("funccompile %v inside %v", fn.Func.Nname.Sym, Curfn.Func.Nname.Sym)
+ base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym())
}
- if fn.Type == nil {
+ if fn.Type() == nil {
if base.Errors() == 0 {
base.Fatalf("funccompile missing type")
}
}
// assign parameter offsets
- dowidth(fn.Type)
+ dowidth(fn.Type())
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
// Initialize ABI wrappers if necessary.
- initLSym(fn.Func, false)
+ initLSym(fn.Func(), false)
emitptrargsmap(fn)
return
}
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
- initLSym(fn.Func, true)
+ initLSym(fn.Func(), true)
walk(fn)
if base.Errors() > errorsBefore {
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Func().Dcl {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
- if livenessShouldTrack(n) && n.Name.Addrtaken() {
- dtypesym(n.Type)
+ if livenessShouldTrack(n) && n.Name().Addrtaken() {
+ dtypesym(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
- if fn.Func.LSym.Func().StackObjects == nil {
- fn.Func.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func.LSym.Name + ".stkobj")
+ if fn.Func().LSym.Func().StackObjects == nil {
+ fn.Func().LSym.Func().StackObjects = base.Ctxt.Lookup(fn.Func().LSym.Name + ".stkobj")
}
}
}
// inline candidate but then never inlined (presumably because we
// found no call sites).
func isInlinableButNotInlined(fn *ir.Node) bool {
- if fn.Func.Nname.Func.Inl == nil {
+ if fn.Func().Nname.Func().Inl == nil {
return false
}
- if fn.Sym == nil {
+ if fn.Sym() == nil {
return true
}
- return !fn.Sym.Linksym().WasInlined()
+ return !fn.Sym().Linksym().WasInlined()
}
const maxStackSize = 1 << 30
func compileSSA(fn *ir.Node, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
- if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize {
+ if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
- largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos})
+ largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
- largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos})
+ largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
- fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack)
+ fieldtrack(pp.Text.From.Sym, fn.Func().FieldTrack)
}
func init() {
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
- return compilequeue[i].Nbody.Len() > compilequeue[j].Nbody.Len()
+ return compilequeue[i].Body().Len() > compilequeue[j].Body().Len()
})
}
var wg sync.WaitGroup
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*ir.Node)
- if fn.Func.Nname != nil {
- if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
+ if fn.Func().Nname != nil {
+ if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
}
}
//
// These two adjustments keep toolstash -cmp working for now.
// Deciding the right answer is, as they say, future work.
- isODCLFUNC := fn.Op == ir.ODCLFUNC
+ isODCLFUNC := fn.Op() == ir.ODCLFUNC
var apdecls []*ir.Node
// Populate decls for fn.
if isODCLFUNC {
- for _, n := range fn.Func.Dcl {
- if n.Op != ir.ONAME { // might be OTYPE or OLITERAL
+ for _, n := range fn.Func().Dcl {
+ if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
continue
}
switch n.Class() {
case ir.PAUTO:
- if !n.Name.Used() {
+ if !n.Name().Used() {
// Text == nil -> generating abstract function
if fnsym.Func().Text != nil {
base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
}
}
- decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func, apdecls)
+ decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn.Func(), apdecls)
// For each type referenced by the functions auto vars but not
// already referenced by a dwarf var, attach an R_USETYPE relocation to
var varScopes []ir.ScopeID
for _, decl := range decls {
pos := declPos(decl)
- varScopes = append(varScopes, findScope(fn.Func.Marks, pos))
+ varScopes = append(varScopes, findScope(fn.Func().Marks, pos))
}
scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
}
func declPos(decl *ir.Node) src.XPos {
- if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) {
+ if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) {
// It's not clear which position is correct for captured variables here:
// * decl.Pos is the wrong position for captured variables, in the inner
// function, but it is the right position in the outer function.
// case statement.
// This code is probably wrong for type switch variables that are also
// captured.
- return decl.Name.Defn.Pos
+ return decl.Name().Defn.Pos()
}
- return decl.Pos
+ return decl.Pos()
}
// createSimpleVars creates a DWARF entry for every variable declared in the
func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var {
var abbrev int
- offs := n.Xoffset
+ offs := n.Offset()
switch n.Class() {
case ir.PAUTO:
base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
}
- typename := dwarf.InfoPrefix + typesymname(n.Type)
+ typename := dwarf.InfoPrefix + typesymname(n.Type())
delete(fnsym.Func().Autot, ngotype(n).Linksym())
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
declpos := base.Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
- Name: n.Sym.Name,
+ Name: n.Sym().Name,
IsReturnValue: n.Class() == ir.PPARAMOUT,
- IsInlFormal: n.Name.InlFormal(),
+ IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: base.Ctxt.Lookup(typename),
if _, found := selected[n]; found {
continue
}
- c := n.Sym.Name[0]
- if c == '.' || n.Type.IsUntyped() {
+ c := n.Sym().Name[0]
+ if c == '.' || n.Type().IsUntyped() {
continue
}
- if n.Class() == ir.PPARAM && !canSSAType(n.Type) {
+ if n.Class() == ir.PPARAM && !canSSAType(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
decls = append(decls, n)
continue
}
- typename := dwarf.InfoPrefix + typesymname(n.Type)
+ typename := dwarf.InfoPrefix + typesymname(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
isReturnValue := (n.Class() == ir.PPARAMOUT)
// misleading location for the param (we want pointer-to-heap
// and not stack).
// TODO(thanm): generate a better location expression
- stackcopy := n.Name.Param.Stackcopy
+ stackcopy := n.Name().Param.Stackcopy
if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
isReturnValue = (stackcopy.Class() == ir.PPARAMOUT)
}
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
- declpos := base.Ctxt.InnermostPos(n.Pos)
+ declpos := base.Ctxt.InnermostPos(n.Pos())
vars = append(vars, &dwarf.Var{
- Name: n.Sym.Name,
+ Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
- StackOffset: int32(n.Xoffset),
+ StackOffset: int32(n.Offset()),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
func preInliningDcls(fnsym *obj.LSym) []*ir.Node {
fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Node)
var rdcl []*ir.Node
- for _, n := range fn.Func.Inl.Dcl {
- c := n.Sym.Name[0]
+ for _, n := range fn.Func().Inl.Dcl {
+ c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
- if unversion(n.Sym.Name) == "_" || c == '.' || n.Type.IsUntyped() {
+ if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
case ir.PPARAM, ir.PPARAMOUT:
off += base.Ctxt.FixedFrameSize()
}
- return int32(off + n.Xoffset + slot.Off)
+ return int32(off + n.Offset() + slot.Off)
}
// createComplexVar builds a single DWARF variable entry and location list.
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
- if n.Name.InlFormal() || n.Name.InlLocal() {
- inlIndex = posInlIndex(n.Pos) + 1
- if n.Name.InlFormal() {
+ if n.Name().InlFormal() || n.Name().InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
- declpos := base.Ctxt.InnermostPos(n.Pos)
+ declpos := base.Ctxt.InnermostPos(n.Pos())
dvar := &dwarf.Var{
- Name: n.Sym.Name,
+ Name: n.Sym().Name,
IsReturnValue: n.Class() == ir.PPARAMOUT,
- IsInlFormal: n.Name.InlFormal(),
+ IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
Type: base.Ctxt.Lookup(typename),
// The stack offset is used as a sorting key, so for decomposed
}
func markUsed(n *ir.Node) *ir.Node {
- n.Name.SetUsed(true)
+ n.Name().SetUsed(true)
return n
}
func markNeedZero(n *ir.Node) *ir.Node {
- n.Name.SetNeedzero(true)
+ n.Name().SetNeedzero(true)
return n
}
s = &types.Sym{Name: "."}
}
n := NewName(s)
- n.Type = t
- n.Xoffset = xoffset
+ n.SetType(t)
+ n.SetOffset(xoffset)
n.SetClass(cl)
return n
}
func TestStackvarSort(t *testing.T) {
nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node {
n := NewName(s)
- n.Type = t
- n.Xoffset = xoffset
+ n.SetType(t)
+ n.SetOffset(xoffset)
n.SetClass(cl)
return n
}
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n *ir.Node) bool {
- return n.Op == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type.HasPointers()
+ return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
// and a map for looking up indices by *Node.
func getvariables(fn *ir.Node) ([]*ir.Node, map[*ir.Node]int32) {
var vars []*ir.Node
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Func().Dcl {
if livenessShouldTrack(n) {
vars = append(vars, n)
}
// If v does not affect any tracked variables, it returns -1, 0.
func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
n, e := affectedNode(v)
- if e == 0 || n == nil || n.Op != ir.ONAME { // cheapest checks first
+ if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first
return -1, 0
}
// variable" ICEs (issue 19632).
switch v.Op {
case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
- if !n.Name.Used() {
+ if !n.Name().Used() {
return -1, 0
}
}
if e&(ssa.SymRead|ssa.SymAddr) != 0 {
effect |= uevar
}
- if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
+ if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) {
effect |= varkill
}
node := vars[i]
switch node.Class() {
case ir.PAUTO:
- onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals)
+ onebitwalktype1(node.Type(), node.Offset()+lv.stkptrsize, locals)
case ir.PPARAM, ir.PPARAMOUT:
- onebitwalktype1(node.Type, node.Xoffset, args)
+ onebitwalktype1(node.Type(), node.Offset(), args)
}
}
}
// pointers to copy values back to the stack).
// TODO: if the output parameter is heap-allocated, then we
// don't need to keep the stack copy live?
- if lv.fn.Func.HasDefer() {
+ if lv.fn.Func().HasDefer() {
for i, n := range lv.vars {
if n.Class() == ir.PPARAMOUT {
- if n.Name.IsOutputParamHeapAddr() {
+ if n.Name().IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
base.Fatalf("variable %v both output param and heap output param", n)
}
- if n.Name.Param.Heapaddr != nil {
+ if n.Name().Param.Heapaddr != nil {
// If this variable moved to the heap, then
// its stack copy is not live.
continue
// Note: zeroing is handled by zeroResults in walk.go.
livedefer.Set(int32(i))
}
- if n.Name.IsOutputParamHeapAddr() {
+ if n.Name().IsOutputParamHeapAddr() {
// This variable will be overwritten early in the function
// prologue (from the result of a mallocgc) but we need to
// zero it in case that malloc causes a stack scan.
- n.Name.SetNeedzero(true)
+ n.Name().SetNeedzero(true)
livedefer.Set(int32(i))
}
- if n.Name.OpenDeferSlot() {
+ if n.Name().OpenDeferSlot() {
// Open-coded defer args slots must be live
// everywhere in a function, since a panic can
// occur (almost) anywhere. Because it is live
// everywhere, it must be zeroed on entry.
livedefer.Set(int32(i))
// It was already marked as Needzero when created.
- if !n.Name.Needzero() {
+ if !n.Name().Needzero() {
base.Fatalf("all pointer-containing defer arg slots should have Needzero set")
}
}
if n.Class() == ir.PPARAM {
continue // ok
}
- base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n)
+ base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Func().Nname, n)
}
// Record live variables.
}
// If we have an open-coded deferreturn call, make a liveness map for it.
- if lv.fn.Func.OpenCodedDeferDisallowed() {
+ if lv.fn.Func().OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessDontCare
} else {
lv.livenessMap.deferreturn = LivenessIndex{
// input parameters.
for j, n := range lv.vars {
if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
- lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func.Nname, n)
+ lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Func().Nname, n)
}
}
}
return
}
- pos := lv.fn.Func.Nname.Pos
+ pos := lv.fn.Func().Nname.Pos()
if v != nil {
pos = v.Pos
}
if !live.Get(int32(i)) {
continue
}
- fmt.Printf("%s%s", comma, n.Sym.Name)
+ fmt.Printf("%s%s", comma, n.Sym().Name)
comma = ","
}
return true
}
fmt.Printf("%s=", name)
if x {
- fmt.Printf("%s", lv.vars[pos].Sym.Name)
+ fmt.Printf("%s", lv.vars[pos].Sym().Name)
}
return true
if b == lv.f.Entry {
live := lv.stackMaps[0]
- fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func.Nname.Pos))
+ fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Func().Nname.Pos()))
fmt.Printf("\tlive=")
printed = false
for j, n := range lv.vars {
for _, n := range lv.vars {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT:
- if maxArgNode == nil || n.Xoffset > maxArgNode.Xoffset {
+ if maxArgNode == nil || n.Offset() > maxArgNode.Offset() {
maxArgNode = n
}
}
// Next, find the offset of the largest pointer in the largest node.
var maxArgs int64
if maxArgNode != nil {
- maxArgs = maxArgNode.Xoffset + typeptrdata(maxArgNode.Type)
+ maxArgs = maxArgNode.Offset() + typeptrdata(maxArgNode.Type())
}
// Size locals bitmaps to be stkptrsize sized.
}
// Emit the live pointer map data structures
- ls := e.curfn.Func.LSym
+ ls := e.curfn.Func().LSym
fninfo := ls.Func()
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
}
func instrument(fn *ir.Node) {
- if fn.Func.Pragma&ir.Norace != 0 {
+ if fn.Func().Pragma&ir.Norace != 0 {
return
}
if !base.Flag.Race || !ispkgin(norace_inst_pkgs) {
- fn.Func.SetInstrumentBody(true)
+ fn.Func().SetInstrumentBody(true)
}
if base.Flag.Race {
base.Pos = src.NoXPos
if thearch.LinkArch.Arch.Family != sys.AMD64 {
- fn.Func.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
- fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
+ fn.Func().Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
+ fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil))
} else {
// nodpc is the PC of the caller as extracted by
// work on arm or others that might support
// race in the future.
nodpc := ir.Copy(nodfp)
- nodpc.Type = types.Types[types.TUINTPTR]
- nodpc.Xoffset = int64(-Widthptr)
- fn.Func.Dcl = append(fn.Func.Dcl, nodpc)
- fn.Func.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
- fn.Func.Exit.Append(mkcall("racefuncexit", nil, nil))
+ nodpc.SetType(types.Types[types.TUINTPTR])
+ nodpc.SetOffset(int64(-Widthptr))
+ fn.Func().Dcl = append(fn.Func().Dcl, nodpc)
+ fn.Func().Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
+ fn.Func().Exit.Append(mkcall("racefuncexit", nil, nil))
}
base.Pos = lno
}
// second half of dance, the first half being typecheckrangeExpr
n.SetTypecheck(1)
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
decldepth++
- typecheckslice(n.Nbody.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
decldepth--
}
func typecheckrangeExpr(n *ir.Node) {
- n.Right = typecheck(n.Right, ctxExpr)
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- t := n.Right.Type
+ t := n.Right().Type()
if t == nil {
return
}
// delicate little dance. see typecheckas2
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
- if n1.Name == nil || n1.Name.Defn != n {
+ if n1.Name() == nil || n1.Name().Defn != n {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
if t.IsPtr() && t.Elem().IsArray() {
t = t.Elem()
}
- n.Type = t
+ n.SetType(t)
var t1, t2 *types.Type
toomany := false
switch t.Etype {
default:
- base.ErrorfAt(n.Pos, "cannot range over %L", n.Right)
+ base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right())
return
case types.TARRAY, types.TSLICE:
case types.TCHAN:
if !t.ChanDir().CanRecv() {
- base.ErrorfAt(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
+ base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.Right(), n.Right().Type())
return
}
t1 = t.Elem()
t2 = nil
- if n.List.Len() == 2 {
+ if n.List().Len() == 2 {
toomany = true
}
t2 = types.Runetype
}
- if n.List.Len() > 2 || toomany {
- base.ErrorfAt(n.Pos, "too many variables in range")
+ if n.List().Len() > 2 || toomany {
+ base.ErrorfAt(n.Pos(), "too many variables in range")
}
var v1, v2 *ir.Node
- if n.List.Len() != 0 {
- v1 = n.List.First()
+ if n.List().Len() != 0 {
+ v1 = n.List().First()
}
- if n.List.Len() > 1 {
- v2 = n.List.Second()
+ if n.List().Len() > 1 {
+ v2 = n.List().Second()
}
// this is not only an optimization but also a requirement in the spec.
// present."
if ir.IsBlank(v2) {
if v1 != nil {
- n.List.Set1(v1)
+ n.PtrList().Set1(v1)
}
v2 = nil
}
if v1 != nil {
- if v1.Name != nil && v1.Name.Defn == n {
- v1.Type = t1
- } else if v1.Type != nil {
- if op, why := assignop(t1, v1.Type); op == ir.OXXX {
- base.ErrorfAt(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why)
+ if v1.Name() != nil && v1.Name().Defn == n {
+ v1.SetType(t1)
+ } else if v1.Type() != nil {
+ if op, why := assignop(t1, v1.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why)
}
}
checkassign(n, v1)
}
if v2 != nil {
- if v2.Name != nil && v2.Name.Defn == n {
- v2.Type = t2
- } else if v2.Type != nil {
- if op, why := assignop(t2, v2.Type); op == ir.OXXX {
- base.ErrorfAt(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why)
+ if v2.Name() != nil && v2.Name().Defn == n {
+ v2.SetType(t2)
+ } else if v2.Type() != nil {
+ if op, why := assignop(t2, v2.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why)
}
}
checkassign(n, v2)
// the returned node.
func walkrange(n *ir.Node) *ir.Node {
if isMapClear(n) {
- m := n.Right
+ m := n.Right()
lno := setlineno(m)
n = mapClear(m)
base.Pos = lno
// hb: hidden bool
// a, v1, v2: not hidden aggregate, val 1, 2
- t := n.Type
+ t := n.Type()
- a := n.Right
+ a := n.Right()
lno := setlineno(a)
- n.Right = nil
+ n.SetRight(nil)
var v1, v2 *ir.Node
- l := n.List.Len()
+ l := n.List().Len()
if l > 0 {
- v1 = n.List.First()
+ v1 = n.List().First()
}
if l > 1 {
- v2 = n.List.Second()
+ v2 = n.List().Second()
}
if ir.IsBlank(v2) {
// n.List has no meaning anymore, clear it
// to avoid erroneous processing by racewalk.
- n.List.Set(nil)
+ n.PtrList().Set(nil)
var ifGuard *ir.Node
init = append(init, ir.Nod(ir.OAS, hv1, nil))
init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil)))
- n.Left = ir.Nod(ir.OLT, hv1, hn)
- n.Right = ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1)))
+ n.SetLeft(ir.Nod(ir.OLT, hv1, hn))
+ n.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// for range ha { body }
if v1 == nil {
}
// for v1, v2 := range ha { body }
- if cheapComputableIndex(n.Type.Elem().Width) {
+ if cheapComputableIndex(n.Type().Elem().Width) {
// v1, v2 = hv1, ha[hv1]
tmp := ir.Nod(ir.OINDEX, ha, hv1)
tmp.SetBounded(true)
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := ir.Nod(ir.OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1, tmp)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1, tmp)
body = []*ir.Node{a}
break
}
// elimination on the index variable (see #20711).
// Enhance the prove pass to understand this.
ifGuard = ir.Nod(ir.OIF, nil, nil)
- ifGuard.Left = ir.Nod(ir.OLT, hv1, hn)
+ ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn))
translatedLoopOp = ir.OFORUNTIL
- hp := temp(types.NewPtr(n.Type.Elem()))
+ hp := temp(types.NewPtr(n.Type().Elem()))
tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0))
tmp.SetBounded(true)
init = append(init, ir.Nod(ir.OAS, hp, ir.Nod(ir.OADDR, tmp, nil)))
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := ir.Nod(ir.OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1, ir.Nod(ir.ODEREF, hp, nil))
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1, ir.Nod(ir.ODEREF, hp, nil))
body = append(body, a)
// Advance pointer as part of the late increment.
// end of the allocation.
a = ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width))
a = typecheck(a, ctxStmt)
- n.List.Set1(a)
+ n.PtrList().Set1(a)
case types.TMAP:
// order.stmt allocated the iterator for us.
ha := a
hit := prealloc[n]
- th := hit.Type
- n.Left = nil
+ th := hit.Type()
+ n.SetLeft(nil)
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
elemsym := th.Field(1).Sym // ditto
fn = substArgTypes(fn, t.Key(), t.Elem(), th)
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, ir.Nod(ir.OADDR, hit, nil)))
- n.Left = ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil())
+ n.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()))
fn = syslook("mapiternext")
fn = substArgTypes(fn, th)
- n.Right = mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil))
+ n.SetRight(mkcall1(fn, nil, nil, ir.Nod(ir.OADDR, hit, nil)))
key := nodSym(ir.ODOT, hit, keysym)
key = ir.Nod(ir.ODEREF, key, nil)
elem := nodSym(ir.ODOT, hit, elemsym)
elem = ir.Nod(ir.ODEREF, elem, nil)
a := ir.Nod(ir.OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(key, elem)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(key, elem)
body = []*ir.Node{a}
}
// order.stmt arranged for a copy of the channel variable.
ha := a
- n.Left = nil
+ n.SetLeft(nil)
hv1 := temp(t.Elem())
hv1.SetTypecheck(1)
}
hb := temp(types.Types[types.TBOOL])
- n.Left = ir.Nod(ir.ONE, hb, nodbool(false))
+ n.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false)))
a := ir.Nod(ir.OAS2RECV, nil, nil)
a.SetTypecheck(1)
- a.List.Set2(hv1, hb)
- a.Right = ir.Nod(ir.ORECV, ha, nil)
- n.Left.Ninit.Set1(a)
+ a.PtrList().Set2(hv1, hb)
+ a.SetRight(ir.Nod(ir.ORECV, ha, nil))
+ n.Left().PtrInit().Set1(a)
if v1 == nil {
body = nil
} else {
init = append(init, ir.Nod(ir.OAS, hv1, nil))
// hv1 < len(ha)
- n.Left = ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil))
+ n.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil)))
if v1 != nil {
// hv1t = hv1
// if hv2 < utf8.RuneSelf
nif := ir.Nod(ir.OIF, nil, nil)
- nif.Left = ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf))
+ nif.SetLeft(ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf)))
// hv1++
- nif.Nbody.Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// } else {
eif := ir.Nod(ir.OAS2, nil, nil)
- nif.Rlist.Set1(eif)
+ nif.PtrRlist().Set1(eif)
// hv2, hv1 = decoderune(ha, hv1)
- eif.List.Set2(hv2, hv1)
+ eif.PtrList().Set2(hv2, hv1)
fn := syslook("decoderune")
- eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
+ eif.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1))
body = append(body, nif)
if v2 != nil {
// v1, v2 = hv1t, hv2
a := ir.Nod(ir.OAS2, nil, nil)
- a.List.Set2(v1, v2)
- a.Rlist.Set2(hv1t, hv2)
+ a.PtrList().Set2(v1, v2)
+ a.PtrRlist().Set2(hv1t, hv2)
body = append(body, a)
} else {
// v1 = hv1t
}
}
- n.Op = translatedLoopOp
+ n.SetOp(translatedLoopOp)
typecheckslice(init, ctxStmt)
if ifGuard != nil {
- ifGuard.Ninit.Append(init...)
+ ifGuard.PtrInit().Append(init...)
ifGuard = typecheck(ifGuard, ctxStmt)
} else {
- n.Ninit.Append(init...)
+ n.PtrInit().Append(init...)
}
- typecheckslice(n.Left.Ninit.Slice(), ctxStmt)
+ typecheckslice(n.Left().Init().Slice(), ctxStmt)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Right = typecheck(n.Right, ctxStmt)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetRight(typecheck(n.Right(), ctxStmt))
typecheckslice(body, ctxStmt)
- n.Nbody.Prepend(body...)
+ n.PtrBody().Prepend(body...)
if ifGuard != nil {
- ifGuard.Nbody.Set1(n)
+ ifGuard.PtrBody().Set1(n)
n = ifGuard
}
return false
}
- if n.Op != ir.ORANGE || n.Type.Etype != types.TMAP || n.List.Len() != 1 {
+ if n.Op() != ir.ORANGE || n.Type().Etype != types.TMAP || n.List().Len() != 1 {
return false
}
- k := n.List.First()
+ k := n.List().First()
if k == nil || ir.IsBlank(k) {
return false
}
// Require k to be a new variable name.
- if k.Name == nil || k.Name.Defn != n {
+ if k.Name() == nil || k.Name().Defn != n {
return false
}
- if n.Nbody.Len() != 1 {
+ if n.Body().Len() != 1 {
return false
}
- stmt := n.Nbody.First() // only stmt in body
- if stmt == nil || stmt.Op != ir.ODELETE {
+ stmt := n.Body().First() // only stmt in body
+ if stmt == nil || stmt.Op() != ir.ODELETE {
return false
}
- m := n.Right
- if !samesafeexpr(stmt.List.First(), m) || !samesafeexpr(stmt.List.Second(), k) {
+ m := n.Right()
+ if !samesafeexpr(stmt.List().First(), m) || !samesafeexpr(stmt.List().Second(), k) {
return false
}
// Keys where equality is not reflexive can not be deleted from maps.
- if !isreflexive(m.Type.Key()) {
+ if !isreflexive(m.Type().Key()) {
return false
}
// mapClear constructs a call to runtime.mapclear for the map m.
func mapClear(m *ir.Node) *ir.Node {
- t := m.Type
+ t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
fn := syslook("mapclear")
return false
}
- if n.Nbody.Len() != 1 || n.Nbody.First() == nil {
+ if n.Body().Len() != 1 || n.Body().First() == nil {
return false
}
- stmt := n.Nbody.First() // only stmt in body
- if stmt.Op != ir.OAS || stmt.Left.Op != ir.OINDEX {
+ stmt := n.Body().First() // only stmt in body
+ if stmt.Op() != ir.OAS || stmt.Left().Op() != ir.OINDEX {
return false
}
- if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
+ if !samesafeexpr(stmt.Left().Left(), a) || !samesafeexpr(stmt.Left().Right(), v1) {
return false
}
- elemsize := n.Type.Elem().Width
- if elemsize <= 0 || !isZero(stmt.Right) {
+ elemsize := n.Type().Elem().Width
+ if elemsize <= 0 || !isZero(stmt.Right()) {
return false
}
// memclr{NoHeap,Has}Pointers(hp, hn)
// i = len(a) - 1
// }
- n.Op = ir.OIF
+ n.SetOp(ir.OIF)
- n.Nbody.Set(nil)
- n.Left = ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0))
+ n.PtrBody().Set(nil)
+ n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0)))
// hp = &a[0]
hp := temp(types.Types[types.TUNSAFEPTR])
tmp.SetBounded(true)
tmp = ir.Nod(ir.OADDR, tmp, nil)
tmp = convnop(tmp, types.Types[types.TUNSAFEPTR])
- n.Nbody.Append(ir.Nod(ir.OAS, hp, tmp))
+ n.PtrBody().Append(ir.Nod(ir.OAS, hp, tmp))
// hn = len(a) * sizeof(elem(a))
hn := temp(types.Types[types.TUINTPTR])
tmp = ir.Nod(ir.OLEN, a, nil)
tmp = ir.Nod(ir.OMUL, tmp, nodintconst(elemsize))
tmp = conv(tmp, types.Types[types.TUINTPTR])
- n.Nbody.Append(ir.Nod(ir.OAS, hn, tmp))
+ n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp))
var fn *ir.Node
- if a.Type.Elem().HasPointers() {
+ if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
- Curfn.Func.SetWBPos(stmt.Pos)
+ Curfn.Func().SetWBPos(stmt.Pos())
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
} else {
// memclrNoHeapPointers(hp, hn)
fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
}
- n.Nbody.Append(fn)
+ n.PtrBody().Append(fn)
// i = len(a) - 1
v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1)))
- n.Nbody.Append(v1)
+ n.PtrBody().Append(v1)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- typecheckslice(n.Nbody.Slice(), ctxStmt)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ typecheckslice(n.Body().Slice(), ctxStmt)
n = walkstmt(n)
return true
}
// addptr returns (*T)(uintptr(p) + n).
func addptr(p *ir.Node, n int64) *ir.Node {
- t := p.Type
+ t := p.Type()
p = ir.Nod(ir.OCONVNOP, p, nil)
- p.Type = types.Types[types.TUINTPTR]
+ p.SetType(types.Types[types.TUINTPTR])
p = ir.Nod(ir.OADD, p, nodintconst(n))
p = ir.Nod(ir.OCONVNOP, p, nil)
- p.Type = t
+ p.SetType(t)
return p
}
s := typenamesym(t)
if s.Def == nil {
n := ir.NewNameAt(src.NoXPos, s)
- n.Type = types.Types[types.TUINT8]
+ n.SetType(types.Types[types.TUINT8])
n.SetClass(ir.PEXTERN)
n.SetTypecheck(1)
s.Def = ir.AsTypesNode(n)
}
n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
- n.Type = types.NewPtr(ir.AsNode(s.Def).Type)
+ n.SetType(types.NewPtr(ir.AsNode(s.Def).Type()))
n.SetTypecheck(1)
return n
}
s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
if s.Def == nil {
n := NewName(s)
- n.Type = types.Types[types.TUINT8]
+ n.SetType(types.Types[types.TUINT8])
n.SetClass(ir.PEXTERN)
n.SetTypecheck(1)
s.Def = ir.AsTypesNode(n)
}
n := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
- n.Type = types.NewPtr(ir.AsNode(s.Def).Type)
+ n.SetType(types.NewPtr(ir.AsNode(s.Def).Type()))
n.SetTypecheck(1)
return n
}
func addsignats(dcls []*ir.Node) {
// copy types from dcl list to signatset
for _, n := range dcls {
- if n.Op == ir.OTYPE {
- addsignat(n.Type)
+ if n.Op() == ir.OTYPE {
+ addsignat(n.Type())
}
}
}
s := mappkg.Lookup("zero")
if s.Def == nil {
x := NewName(s)
- x.Type = types.Types[types.TUINT8]
+ x.SetType(types.Types[types.TUINT8])
x.SetClass(ir.PEXTERN)
x.SetTypecheck(1)
s.Def = ir.AsTypesNode(x)
}
z := ir.Nod(ir.OADDR, ir.AsNode(s.Def), nil)
- z.Type = types.NewPtr(types.Types[types.TUINT8])
+ z.SetType(types.NewPtr(types.Types[types.TUINT8]))
z.SetTypecheck(1)
return z
}
v.analyze = analyze
v.nodeID = make(map[*ir.Node]uint32)
for _, n := range list {
- if n.Op == ir.ODCLFUNC && !n.Func.IsHiddenClosure() {
+ if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() {
v.visit(n)
}
}
min := v.visitgen
v.stack = append(v.stack, n)
- ir.InspectList(n.Nbody, func(n *ir.Node) bool {
- switch n.Op {
+ ir.InspectList(n.Body(), func(n *ir.Node) bool {
+ switch n.Op() {
case ir.ONAME:
if n.Class() == ir.PFUNC {
- if n != nil && n.Name.Defn != nil {
- if m := v.visit(n.Name.Defn); m < min {
+ if n != nil && n.Name().Defn != nil {
+ if m := v.visit(n.Name().Defn); m < min {
min = m
}
}
}
case ir.OMETHEXPR:
fn := methodExprName(n)
- if fn != nil && fn.Name.Defn != nil {
- if m := v.visit(fn.Name.Defn); m < min {
+ if fn != nil && fn.Name().Defn != nil {
+ if m := v.visit(fn.Name().Defn); m < min {
min = m
}
}
case ir.ODOTMETH:
fn := methodExprName(n)
- if fn != nil && fn.Op == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name.Defn != nil {
- if m := v.visit(fn.Name.Defn); m < min {
+ if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
+ if m := v.visit(fn.Name().Defn); m < min {
min = m
}
}
case ir.OCALLPART:
fn := ir.AsNode(callpartMethod(n).Nname)
- if fn != nil && fn.Op == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name.Defn != nil {
- if m := v.visit(fn.Name.Defn); m < min {
+ if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
+ if m := v.visit(fn.Name().Defn); m < min {
min = m
}
}
case ir.OCLOSURE:
- if m := v.visit(n.Func.Decl); m < min {
+ if m := v.visit(n.Func().Decl); m < min {
min = m
}
}
return true
})
- if (min == id || min == id+1) && !n.Func.IsHiddenClosure() {
+ if (min == id || min == id+1) && !n.Func().IsHiddenClosure() {
// This node is the root of a strongly connected component.
// The original min passed to visitcodelist was v.nodeID[n]+1.
func assembleScopes(fnsym *obj.LSym, fn *ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
- dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func.Parents))
- for i, parent := range fn.Func.Parents {
+ dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents))
+ for i, parent := range fn.Func().Parents {
dwarfScopes[i+1].Parent = int32(parent)
}
scopeVariables(dwarfVars, varScopes, dwarfScopes)
- scopePCs(fnsym, fn.Func.Marks, dwarfScopes)
+ scopePCs(fnsym, fn.Func().Marks, dwarfScopes)
return compactScopes(dwarfScopes)
}
func typecheckselect(sel *ir.Node) {
var def *ir.Node
lno := setlineno(sel)
- typecheckslice(sel.Ninit.Slice(), ctxStmt)
- for _, ncase := range sel.List.Slice() {
- if ncase.Op != ir.OCASE {
+ typecheckslice(sel.Init().Slice(), ctxStmt)
+ for _, ncase := range sel.List().Slice() {
+ if ncase.Op() != ir.OCASE {
setlineno(ncase)
- base.Fatalf("typecheckselect %v", ncase.Op)
+ base.Fatalf("typecheckselect %v", ncase.Op())
}
- if ncase.List.Len() == 0 {
+ if ncase.List().Len() == 0 {
// default
if def != nil {
- base.ErrorfAt(ncase.Pos, "multiple defaults in select (first at %v)", ir.Line(def))
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
} else {
def = ncase
}
- } else if ncase.List.Len() > 1 {
- base.ErrorfAt(ncase.Pos, "select cases cannot be lists")
+ } else if ncase.List().Len() > 1 {
+ base.ErrorfAt(ncase.Pos(), "select cases cannot be lists")
} else {
- ncase.List.SetFirst(typecheck(ncase.List.First(), ctxStmt))
- n := ncase.List.First()
- ncase.Left = n
- ncase.List.Set(nil)
- switch n.Op {
+ ncase.List().SetFirst(typecheck(ncase.List().First(), ctxStmt))
+ n := ncase.List().First()
+ ncase.SetLeft(n)
+ ncase.PtrList().Set(nil)
+ switch n.Op() {
default:
- pos := n.Pos
- if n.Op == ir.ONAME {
+ pos := n.Pos()
+ if n.Op() == ir.ONAME {
// We don't have the right position for ONAME nodes (see #15459 and
// others). Using ncase.Pos for now as it will provide the correct
// line number (assuming the expression follows the "case" keyword
// on the same line). This matches the approach before 1.10.
- pos = ncase.Pos
+ pos = ncase.Pos()
}
base.ErrorfAt(pos, "select case must be receive, send or assign recv")
// remove implicit conversions; the eventual assignment
// will reintroduce them.
case ir.OAS:
- if (n.Right.Op == ir.OCONVNOP || n.Right.Op == ir.OCONVIFACE) && n.Right.Implicit() {
- n.Right = n.Right.Left
+ if (n.Right().Op() == ir.OCONVNOP || n.Right().Op() == ir.OCONVIFACE) && n.Right().Implicit() {
+ n.SetRight(n.Right().Left())
}
- if n.Right.Op != ir.ORECV {
- base.ErrorfAt(n.Pos, "select assignment must have receive on right hand side")
+ if n.Right().Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
- n.Op = ir.OSELRECV
+ n.SetOp(ir.OSELRECV)
// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
case ir.OAS2RECV:
- if n.Right.Op != ir.ORECV {
- base.ErrorfAt(n.Pos, "select assignment must have receive on right hand side")
+ if n.Right().Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
- n.Op = ir.OSELRECV2
- n.Left = n.List.First()
- n.List.Set1(n.List.Second())
+ n.SetOp(ir.OSELRECV2)
+ n.SetLeft(n.List().First())
+ n.PtrList().Set1(n.List().Second())
// convert <-c into OSELRECV(N, <-c)
case ir.ORECV:
- n = ir.NodAt(n.Pos, ir.OSELRECV, nil, n)
+ n = ir.NodAt(n.Pos(), ir.OSELRECV, nil, n)
n.SetTypecheck(1)
- ncase.Left = n
+ ncase.SetLeft(n)
case ir.OSEND:
break
}
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
base.Pos = lno
func walkselect(sel *ir.Node) {
lno := setlineno(sel)
- if sel.Nbody.Len() != 0 {
+ if sel.Body().Len() != 0 {
base.Fatalf("double walkselect")
}
- init := sel.Ninit.Slice()
- sel.Ninit.Set(nil)
+ init := sel.Init().Slice()
+ sel.PtrInit().Set(nil)
- init = append(init, walkselectcases(&sel.List)...)
- sel.List.Set(nil)
+ init = append(init, walkselectcases(sel.PtrList())...)
+ sel.PtrList().Set(nil)
- sel.Nbody.Set(init)
- walkstmtlist(sel.Nbody.Slice())
+ sel.PtrBody().Set(init)
+ walkstmtlist(sel.Body().Slice())
base.Pos = lno
}
if ncas == 1 {
cas := cases.First()
setlineno(cas)
- l := cas.Ninit.Slice()
- if cas.Left != nil { // not default:
- n := cas.Left
- l = append(l, n.Ninit.Slice()...)
- n.Ninit.Set(nil)
- switch n.Op {
+ l := cas.Init().Slice()
+ if cas.Left() != nil { // not default:
+ n := cas.Left()
+ l = append(l, n.Init().Slice()...)
+ n.PtrInit().Set(nil)
+ switch n.Op() {
default:
- base.Fatalf("select %v", n.Op)
+ base.Fatalf("select %v", n.Op())
case ir.OSEND:
// already ok
case ir.OSELRECV, ir.OSELRECV2:
- if n.Op == ir.OSELRECV || n.List.Len() == 0 {
- if n.Left == nil {
- n = n.Right
+ if n.Op() == ir.OSELRECV || n.List().Len() == 0 {
+ if n.Left() == nil {
+ n = n.Right()
} else {
- n.Op = ir.OAS
+ n.SetOp(ir.OAS)
}
break
}
- if n.Left == nil {
+ if n.Left() == nil {
ir.BlankNode = typecheck(ir.BlankNode, ctxExpr|ctxAssign)
- n.Left = ir.BlankNode
+ n.SetLeft(ir.BlankNode)
}
- n.Op = ir.OAS2
- n.List.Prepend(n.Left)
- n.Rlist.Set1(n.Right)
- n.Right = nil
- n.Left = nil
+ n.SetOp(ir.OAS2)
+ n.PtrList().Prepend(n.Left())
+ n.PtrRlist().Set1(n.Right())
+ n.SetRight(nil)
+ n.SetLeft(nil)
n.SetTypecheck(0)
n = typecheck(n, ctxStmt)
}
l = append(l, n)
}
- l = append(l, cas.Nbody.Slice()...)
+ l = append(l, cas.Body().Slice()...)
l = append(l, ir.Nod(ir.OBREAK, nil, nil))
return l
}
var dflt *ir.Node
for _, cas := range cases.Slice() {
setlineno(cas)
- n := cas.Left
+ n := cas.Left()
if n == nil {
dflt = cas
continue
}
- switch n.Op {
+ switch n.Op() {
case ir.OSEND:
- n.Right = ir.Nod(ir.OADDR, n.Right, nil)
- n.Right = typecheck(n.Right, ctxExpr)
+ n.SetRight(ir.Nod(ir.OADDR, n.Right(), nil))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
case ir.OSELRECV, ir.OSELRECV2:
- if n.Op == ir.OSELRECV2 && n.List.Len() == 0 {
- n.Op = ir.OSELRECV
+ if n.Op() == ir.OSELRECV2 && n.List().Len() == 0 {
+ n.SetOp(ir.OSELRECV)
}
- if n.Left != nil {
- n.Left = ir.Nod(ir.OADDR, n.Left, nil)
- n.Left = typecheck(n.Left, ctxExpr)
+ if n.Left() != nil {
+ n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
}
}
}
cas = cases.Second()
}
- n := cas.Left
+ n := cas.Left()
setlineno(n)
r := ir.Nod(ir.OIF, nil, nil)
- r.Ninit.Set(cas.Ninit.Slice())
- switch n.Op {
+ r.PtrInit().Set(cas.Init().Slice())
+ switch n.Op() {
default:
- base.Fatalf("select %v", n.Op)
+ base.Fatalf("select %v", n.Op())
case ir.OSEND:
// if selectnbsend(c, v) { body } else { default body }
- ch := n.Left
- r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, ch, n.Right)
+ ch := n.Left()
+ r.SetLeft(mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right()))
case ir.OSELRECV:
// if selectnbrecv(&v, c) { body } else { default body }
- ch := n.Right.Left
- elem := n.Left
+ ch := n.Right().Left()
+ elem := n.Left()
if elem == nil {
elem = nodnil()
}
- r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, elem, ch)
+ r.SetLeft(mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch))
case ir.OSELRECV2:
// if selectnbrecv2(&v, &received, c) { body } else { default body }
- ch := n.Right.Left
- elem := n.Left
+ ch := n.Right().Left()
+ elem := n.Left()
if elem == nil {
elem = nodnil()
}
- receivedp := ir.Nod(ir.OADDR, n.List.First(), nil)
+ receivedp := ir.Nod(ir.OADDR, n.List().First(), nil)
receivedp = typecheck(receivedp, ctxExpr)
- r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), types.Types[types.TBOOL], &r.Ninit, elem, receivedp, ch)
+ r.SetLeft(mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch))
}
- r.Left = typecheck(r.Left, ctxExpr)
- r.Nbody.Set(cas.Nbody.Slice())
- r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
+ r.SetLeft(typecheck(r.Left(), ctxExpr))
+ r.PtrBody().Set(cas.Body().Slice())
+ r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...))
return []*ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
}
for _, cas := range cases.Slice() {
setlineno(cas)
- init = append(init, cas.Ninit.Slice()...)
- cas.Ninit.Set(nil)
+ init = append(init, cas.Init().Slice()...)
+ cas.PtrInit().Set(nil)
- n := cas.Left
+ n := cas.Left()
if n == nil { // default:
continue
}
var i int
var c, elem *ir.Node
- switch n.Op {
+ switch n.Op() {
default:
- base.Fatalf("select %v", n.Op)
+ base.Fatalf("select %v", n.Op())
case ir.OSEND:
i = nsends
nsends++
- c = n.Left
- elem = n.Right
+ c = n.Left()
+ elem = n.Right()
case ir.OSELRECV, ir.OSELRECV2:
nrecvs++
i = ncas - nrecvs
- c = n.Right.Left
- elem = n.Left
+ c = n.Right().Left()
+ elem = n.Left()
}
casorder[i] = cas
chosen := temp(types.Types[types.TINT])
recvOK := temp(types.Types[types.TBOOL])
r = ir.Nod(ir.OAS2, nil, nil)
- r.List.Set2(chosen, recvOK)
+ r.PtrList().Set2(chosen, recvOK)
fn := syslook("selectgo")
- r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
+ r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
r = typecheck(r, ctxStmt)
init = append(init, r)
r := ir.Nod(ir.OIF, cond, nil)
- if n := cas.Left; n != nil && n.Op == ir.OSELRECV2 {
- x := ir.Nod(ir.OAS, n.List.First(), recvOK)
+ if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 {
+ x := ir.Nod(ir.OAS, n.List().First(), recvOK)
x = typecheck(x, ctxStmt)
- r.Nbody.Append(x)
+ r.PtrBody().Append(x)
}
- r.Nbody.AppendNodes(&cas.Nbody)
- r.Nbody.Append(ir.Nod(ir.OBREAK, nil, nil))
+ r.PtrBody().AppendNodes(cas.PtrBody())
+ r.PtrBody().Append(ir.Nod(ir.OBREAK, nil, nil))
init = append(init, r)
}
// replaced by multiple simple OAS assignments, and the other
// OAS2* assignments mostly necessitate dynamic execution
// anyway.
- if n.Op != ir.OAS {
+ if n.Op() != ir.OAS {
return false
}
- if ir.IsBlank(n.Left) && candiscard(n.Right) {
+ if ir.IsBlank(n.Left()) && candiscard(n.Right()) {
return true
}
lno := setlineno(n)
defer func() { base.Pos = lno }()
- return s.staticassign(n.Left, n.Right)
+ return s.staticassign(n.Left(), n.Right())
}
// like staticassign but we are copying an already
// initialized value r.
func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool {
- if r.Op != ir.ONAME && r.Op != ir.OMETHEXPR {
+ if r.Op() != ir.ONAME && r.Op() != ir.OMETHEXPR {
return false
}
if r.Class() == ir.PFUNC {
pfuncsym(l, r)
return true
}
- if r.Class() != ir.PEXTERN || r.Sym.Pkg != ir.LocalPkg {
+ if r.Class() != ir.PEXTERN || r.Sym().Pkg != ir.LocalPkg {
return false
}
- if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
+ if r.Name().Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
return false
}
- if r.Name.Defn.Op != ir.OAS {
+ if r.Name().Defn.Op() != ir.OAS {
return false
}
- if r.Type.IsString() { // perhaps overwritten by cmd/link -X (#34675)
+ if r.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
return false
}
orig := r
- r = r.Name.Defn.Right
+ r = r.Name().Defn.Right()
- for r.Op == ir.OCONVNOP && !types.Identical(r.Type, l.Type) {
- r = r.Left
+ for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), l.Type()) {
+ r = r.Left()
}
- switch r.Op {
+ switch r.Op() {
case ir.ONAME, ir.OMETHEXPR:
if s.staticcopy(l, r) {
return true
}
// We may have skipped past one or more OCONVNOPs, so
// use conv to ensure r is assignable to l (#13263).
- s.append(ir.Nod(ir.OAS, l, conv(r, l.Type)))
+ s.append(ir.Nod(ir.OAS, l, conv(r, l.Type())))
return true
case ir.ONIL:
if isZero(r) {
return true
}
- litsym(l, r, int(l.Type.Width))
+ litsym(l, r, int(l.Type().Width))
return true
case ir.OADDR:
- if a := r.Left; a.Op == ir.ONAME {
+ if a := r.Left(); a.Op() == ir.ONAME {
addrsym(l, a)
return true
}
case ir.OPTRLIT:
- switch r.Left.Op {
+ switch r.Left().Op() {
case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
// copy pointer
addrsym(l, s.inittemps[r])
case ir.OSLICELIT:
// copy slice
a := s.inittemps[r]
- slicesym(l, a, r.Right.Int64Val())
+ slicesym(l, a, r.Right().Int64Val())
return true
case ir.OARRAYLIT, ir.OSTRUCTLIT:
n := ir.Copy(l)
for i := range p.E {
e := &p.E[i]
- n.Xoffset = l.Xoffset + e.Xoffset
- n.Type = e.Expr.Type
- if e.Expr.Op == ir.OLITERAL || e.Expr.Op == ir.ONIL {
- litsym(n, e.Expr, int(n.Type.Width))
+ n.SetOffset(l.Offset() + e.Xoffset)
+ n.SetType(e.Expr.Type())
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ litsym(n, e.Expr, int(n.Type().Width))
continue
}
ll := ir.SepCopy(n)
// Requires computation, but we're
// copying someone else's computation.
rr := ir.SepCopy(orig)
- rr.Type = ll.Type
- rr.Xoffset = rr.Xoffset + e.Xoffset
+ rr.SetType(ll.Type())
+ rr.SetOffset(rr.Offset() + e.Xoffset)
setlineno(rr)
s.append(ir.Nod(ir.OAS, ll, rr))
}
}
func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool {
- for r.Op == ir.OCONVNOP {
- r = r.Left
+ for r.Op() == ir.OCONVNOP {
+ r = r.Left()
}
- switch r.Op {
+ switch r.Op() {
case ir.ONAME, ir.OMETHEXPR:
return s.staticcopy(l, r)
if isZero(r) {
return true
}
- litsym(l, r, int(l.Type.Width))
+ litsym(l, r, int(l.Type().Width))
return true
case ir.OADDR:
- if nam := stataddr(r.Left); nam != nil {
+ if nam := stataddr(r.Left()); nam != nil {
addrsym(l, nam)
return true
}
fallthrough
case ir.OPTRLIT:
- switch r.Left.Op {
+ switch r.Left().Op() {
case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
// Init pointer.
- a := staticname(r.Left.Type)
+ a := staticname(r.Left().Type())
s.inittemps[r] = a
addrsym(l, a)
// Init underlying literal.
- if !s.staticassign(a, r.Left) {
- s.append(ir.Nod(ir.OAS, a, r.Left))
+ if !s.staticassign(a, r.Left()) {
+ s.append(ir.Nod(ir.OAS, a, r.Left()))
}
return true
}
//dump("not static ptrlit", r);
case ir.OSTR2BYTES:
- if l.Class() == ir.PEXTERN && r.Left.Op == ir.OLITERAL {
- sval := r.Left.StringVal()
+ if l.Class() == ir.PEXTERN && r.Left().Op() == ir.OLITERAL {
+ sval := r.Left().StringVal()
slicebytes(l, sval)
return true
}
case ir.OSLICELIT:
s.initplan(r)
// Init slice.
- bound := r.Right.Int64Val()
- ta := types.NewArray(r.Type.Elem(), bound)
+ bound := r.Right().Int64Val()
+ ta := types.NewArray(r.Type().Elem(), bound)
ta.SetNoalg(true)
a := staticname(ta)
s.inittemps[r] = a
n := ir.Copy(l)
for i := range p.E {
e := &p.E[i]
- n.Xoffset = l.Xoffset + e.Xoffset
- n.Type = e.Expr.Type
- if e.Expr.Op == ir.OLITERAL || e.Expr.Op == ir.ONIL {
- litsym(n, e.Expr, int(n.Type.Width))
+ n.SetOffset(l.Offset() + e.Xoffset)
+ n.SetType(e.Expr.Type())
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ litsym(n, e.Expr, int(n.Type().Width))
continue
}
setlineno(e.Expr)
case ir.OCLOSURE:
if hasemptycvars(r) {
if base.Debug.Closure > 0 {
- base.WarnfAt(r.Pos, "closure converted to global")
+ base.WarnfAt(r.Pos(), "closure converted to global")
}
// Closures with no captured variables are globals,
// so the assignment can be done at link time.
- pfuncsym(l, r.Func.Nname)
+ pfuncsym(l, r.Func().Nname)
return true
}
closuredebugruntimecheck(r)
// Determine the underlying concrete type and value we are converting from.
val := r
- for val.Op == ir.OCONVIFACE {
- val = val.Left
+ for val.Op() == ir.OCONVIFACE {
+ val = val.Left()
}
- if val.Type.IsInterface() {
+ if val.Type().IsInterface() {
// val is an interface type.
// If val is nil, we can statically initialize l;
// both words are zero and so there no work to do, so report success.
// If val is non-nil, we have no concrete type to record,
// and we won't be able to statically initialize its value, so report failure.
- return val.Op == ir.ONIL
+ return val.Op() == ir.ONIL
}
- markTypeUsedInInterface(val.Type, l.Sym.Linksym())
+ markTypeUsedInInterface(val.Type(), l.Sym().Linksym())
var itab *ir.Node
- if l.Type.IsEmptyInterface() {
- itab = typename(val.Type)
+ if l.Type().IsEmptyInterface() {
+ itab = typename(val.Type())
} else {
- itab = itabname(val.Type, l.Type)
+ itab = itabname(val.Type(), l.Type())
}
// Create a copy of l to modify while we emit data.
n := ir.Copy(l)
// Emit itab, advance offset.
- addrsym(n, itab.Left) // itab is an OADDR node
- n.Xoffset = n.Xoffset + int64(Widthptr)
+ addrsym(n, itab.Left()) // itab is an OADDR node
+ n.SetOffset(n.Offset() + int64(Widthptr))
// Emit data.
- if isdirectiface(val.Type) {
- if val.Op == ir.ONIL {
+ if isdirectiface(val.Type()) {
+ if val.Op() == ir.ONIL {
// Nil is zero, nothing to do.
return true
}
// Copy val directly into n.
- n.Type = val.Type
+ n.SetType(val.Type())
setlineno(val)
a := ir.SepCopy(n)
if !s.staticassign(a, val) {
}
} else {
// Construct temp to hold val, write pointer to temp into n.
- a := staticname(val.Type)
+ a := staticname(val.Type())
s.inittemps[val] = a
if !s.staticassign(a, val) {
s.append(ir.Nod(ir.OAS, a, val))
n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
addvar(n, t, ir.PEXTERN)
- n.Sym.Linksym().Set(obj.AttrLocal, true)
+ n.Sym().Linksym().Set(obj.AttrLocal, true)
return n
}
func readonlystaticname(t *types.Type) *ir.Node {
n := staticname(t)
n.MarkReadonly()
- n.Sym.Linksym().Set(obj.AttrContentAddressable, true)
+ n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
return n
}
func isSimpleName(n *ir.Node) bool {
- return (n.Op == ir.ONAME || n.Op == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
+ return (n.Op() == ir.ONAME || n.Op() == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
}
func litas(l *ir.Node, r *ir.Node, init *ir.Nodes) {
// getdyn calculates the initGenType for n.
// If top is false, getdyn is recursing.
func getdyn(n *ir.Node, top bool) initGenType {
- switch n.Op {
+ switch n.Op() {
default:
if isGoConst(n) {
return initConst
if !top {
return initDynamic
}
- if n.Right.Int64Val()/4 > int64(n.List.Len()) {
+ if n.Right().Int64Val()/4 > int64(n.List().Len()) {
// <25% of entries have explicit values.
// Very rough estimation, it takes 4 bytes of instructions
// to initialize 1 byte of result. So don't use a static
}
var mode initGenType
- for _, n1 := range n.List.Slice() {
- switch n1.Op {
+ for _, n1 := range n.List().Slice() {
+ switch n1.Op() {
case ir.OKEY:
- n1 = n1.Right
+ n1 = n1.Right()
case ir.OSTRUCTKEY:
- n1 = n1.Left
+ n1 = n1.Left()
}
mode |= getdyn(n1, false)
if mode == initDynamic|initConst {
// isStaticCompositeLiteral reports whether n is a compile-time constant.
func isStaticCompositeLiteral(n *ir.Node) bool {
- switch n.Op {
+ switch n.Op() {
case ir.OSLICELIT:
return false
case ir.OARRAYLIT:
- for _, r := range n.List.Slice() {
- if r.Op == ir.OKEY {
- r = r.Right
+ for _, r := range n.List().Slice() {
+ if r.Op() == ir.OKEY {
+ r = r.Right()
}
if !isStaticCompositeLiteral(r) {
return false
}
return true
case ir.OSTRUCTLIT:
- for _, r := range n.List.Slice() {
- if r.Op != ir.OSTRUCTKEY {
+ for _, r := range n.List().Slice() {
+ if r.Op() != ir.OSTRUCTKEY {
base.Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
}
- if !isStaticCompositeLiteral(r.Left) {
+ if !isStaticCompositeLiteral(r.Left()) {
return false
}
}
case ir.OCONVIFACE:
// See staticassign's OCONVIFACE case for comments.
val := n
- for val.Op == ir.OCONVIFACE {
- val = val.Left
+ for val.Op() == ir.OCONVIFACE {
+ val = val.Left()
}
- if val.Type.IsInterface() {
- return val.Op == ir.ONIL
+ if val.Type().IsInterface() {
+ return val.Op() == ir.ONIL
}
- if isdirectiface(val.Type) && val.Op == ir.ONIL {
+ if isdirectiface(val.Type()) && val.Op() == ir.ONIL {
return true
}
return isStaticCompositeLiteral(val)
func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
isBlank := var_ == ir.BlankNode
var splitnode func(*ir.Node) (a *ir.Node, value *ir.Node)
- switch n.Op {
+ switch n.Op() {
case ir.OARRAYLIT, ir.OSLICELIT:
var k int64
splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) {
- if r.Op == ir.OKEY {
- k = indexconst(r.Left)
+ if r.Op() == ir.OKEY {
+ k = indexconst(r.Left())
if k < 0 {
- base.Fatalf("fixedlit: invalid index %v", r.Left)
+ base.Fatalf("fixedlit: invalid index %v", r.Left())
}
- r = r.Right
+ r = r.Right()
}
a := ir.Nod(ir.OINDEX, var_, nodintconst(k))
k++
}
case ir.OSTRUCTLIT:
splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) {
- if r.Op != ir.OSTRUCTKEY {
+ if r.Op() != ir.OSTRUCTKEY {
base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
}
- if r.Sym.IsBlank() || isBlank {
- return ir.BlankNode, r.Left
+ if r.Sym().IsBlank() || isBlank {
+ return ir.BlankNode, r.Left()
}
setlineno(r)
- return nodSym(ir.ODOT, var_, r.Sym), r.Left
+ return nodSym(ir.ODOT, var_, r.Sym()), r.Left()
}
default:
- base.Fatalf("fixedlit bad op: %v", n.Op)
+ base.Fatalf("fixedlit bad op: %v", n.Op())
}
- for _, r := range n.List.Slice() {
+ for _, r := range n.List().Slice() {
a, value := splitnode(r)
if a == ir.BlankNode && candiscard(value) {
continue
}
- switch value.Op {
+ switch value.Op() {
case ir.OSLICELIT:
if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
slicelit(ctxt, value, a, init)
}
func isSmallSliceLit(n *ir.Node) bool {
- if n.Op != ir.OSLICELIT {
+ if n.Op() != ir.OSLICELIT {
return false
}
- r := n.Right
+ r := n.Right()
- return smallintconst(r) && (n.Type.Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type.Elem().Width)
+ return smallintconst(r) && (n.Type().Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type().Elem().Width)
}
func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
// make an array type corresponding the number of elements we have
- t := types.NewArray(n.Type.Elem(), n.Right.Int64Val())
+ t := types.NewArray(n.Type().Elem(), n.Right().Int64Val())
dowidth(t)
if ctxt == inNonInitFunction {
var a *ir.Node
if x := prealloc[n]; x != nil {
// temp allocated during order.go for dddarg
- if !types.Identical(t, x.Type) {
+ if !types.Identical(t, x.Type()) {
panic("dotdotdot base type does not match order's assigned type")
}
}
a = ir.Nod(ir.OADDR, x, nil)
- } else if n.Esc == EscNone {
+ } else if n.Esc() == EscNone {
a = temp(t)
if vstat == nil {
a = ir.Nod(ir.OAS, temp(t), nil)
a = typecheck(a, ctxStmt)
init.Append(a) // zero new temp
- a = a.Left
+ a = a.Left()
} else {
init.Append(ir.Nod(ir.OVARDEF, a, nil))
}
a = ir.Nod(ir.OADDR, a, nil)
} else {
a = ir.Nod(ir.ONEW, nil, nil)
- a.List.Set1(typenod(t))
+ a.PtrList().Set1(typenod(t))
}
a = ir.Nod(ir.OAS, vauto, a)
// put dynamics into array (5)
var index int64
- for _, value := range n.List.Slice() {
- if value.Op == ir.OKEY {
- index = indexconst(value.Left)
+ for _, value := range n.List().Slice() {
+ if value.Op() == ir.OKEY {
+ index = indexconst(value.Left())
if index < 0 {
- base.Fatalf("slicelit: invalid index %v", value.Left)
+ base.Fatalf("slicelit: invalid index %v", value.Left())
}
- value = value.Right
+ value = value.Right()
}
a := ir.Nod(ir.OINDEX, vauto, nodintconst(index))
a.SetBounded(true)
// TODO need to check bounds?
- switch value.Op {
+ switch value.Op() {
case ir.OSLICELIT:
break
func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) {
// make the map var
a := ir.Nod(ir.OMAKE, nil, nil)
- a.Esc = n.Esc
- a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len())))
+ a.SetEsc(n.Esc())
+ a.PtrList().Set2(typenod(n.Type()), nodintconst(int64(n.List().Len())))
litas(m, a, init)
- entries := n.List.Slice()
+ entries := n.List().Slice()
// The order pass already removed any dynamic (runtime-computed) entries.
// All remaining entries are static. Double-check that.
for _, r := range entries {
- if !isStaticCompositeLiteral(r.Left) || !isStaticCompositeLiteral(r.Right) {
+ if !isStaticCompositeLiteral(r.Left()) || !isStaticCompositeLiteral(r.Right()) {
base.Fatalf("maplit: entry is not a literal: %v", r)
}
}
// For a large number of entries, put them in an array and loop.
// build types [count]Tindex and [count]Tvalue
- tk := types.NewArray(n.Type.Key(), int64(len(entries)))
- te := types.NewArray(n.Type.Elem(), int64(len(entries)))
+ tk := types.NewArray(n.Type().Key(), int64(len(entries)))
+ te := types.NewArray(n.Type().Elem(), int64(len(entries)))
tk.SetNoalg(true)
te.SetNoalg(true)
datak := ir.Nod(ir.OARRAYLIT, nil, nil)
datae := ir.Nod(ir.OARRAYLIT, nil, nil)
for _, r := range entries {
- datak.List.Append(r.Left)
- datae.List.Append(r.Right)
+ datak.PtrList().Append(r.Left())
+ datae.PtrList().Append(r.Right())
}
fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
body := ir.Nod(ir.OAS, lhs, rhs)
loop := ir.Nod(ir.OFOR, cond, incr)
- loop.Nbody.Set1(body)
- loop.Ninit.Set1(zero)
+ loop.PtrBody().Set1(body)
+ loop.PtrInit().Set1(zero)
loop = typecheck(loop, ctxStmt)
loop = walkstmt(loop)
// Build list of var[c] = expr.
// Use temporaries so that mapassign1 can have addressable key, elem.
// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
- tmpkey := temp(m.Type.Key())
- tmpelem := temp(m.Type.Elem())
+ tmpkey := temp(m.Type().Key())
+ tmpelem := temp(m.Type().Elem())
for _, r := range entries {
- index, elem := r.Left, r.Right
+ index, elem := r.Left(), r.Right()
setlineno(index)
a := ir.Nod(ir.OAS, tmpkey, index)
}
func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
- t := n.Type
- switch n.Op {
+ t := n.Type()
+ switch n.Op() {
default:
- base.Fatalf("anylit: not lit, op=%v node=%v", n.Op, n)
+ base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
case ir.ONAME, ir.OMETHEXPR:
a := ir.Nod(ir.OAS, var_, n)
}
var r *ir.Node
- if n.Right != nil {
+ if n.Right() != nil {
// n.Right is stack temporary used as backing store.
- init.Append(ir.Nod(ir.OAS, n.Right, nil)) // zero backing store, just in case (#18410)
- r = ir.Nod(ir.OADDR, n.Right, nil)
+ init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410)
+ r = ir.Nod(ir.OADDR, n.Right(), nil)
r = typecheck(r, ctxExpr)
} else {
r = ir.Nod(ir.ONEW, nil, nil)
r.SetTypecheck(1)
- r.Type = t
- r.Esc = n.Esc
+ r.SetType(t)
+ r.SetEsc(n.Esc())
}
r = walkexpr(r, init)
var_ = ir.Nod(ir.ODEREF, var_, nil)
var_ = typecheck(var_, ctxExpr|ctxAssign)
- anylit(n.Left, var_, init)
+ anylit(n.Left(), var_, init)
case ir.OSTRUCTLIT, ir.OARRAYLIT:
if !t.IsStruct() && !t.IsArray() {
base.Fatalf("anylit: not struct/array")
}
- if isSimpleName(var_) && n.List.Len() > 4 {
+ if isSimpleName(var_) && n.List().Len() > 4 {
// lay out static data
vstat := readonlystaticname(t)
ctxt := inInitFunction
- if n.Op == ir.OARRAYLIT {
+ if n.Op() == ir.OARRAYLIT {
ctxt = inNonInitFunction
}
fixedlit(ctxt, initKindStatic, n, vstat, init)
}
var components int64
- if n.Op == ir.OARRAYLIT {
+ if n.Op() == ir.OARRAYLIT {
components = t.NumElem()
} else {
components = int64(t.NumFields())
}
// initialization of an array or struct with unspecified components (missing fields or arrays)
- if isSimpleName(var_) || int64(n.List.Len()) < components {
+ if isSimpleName(var_) || int64(n.List().Len()) < components {
a := ir.Nod(ir.OAS, var_, nil)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
}
func oaslit(n *ir.Node, init *ir.Nodes) bool {
- if n.Left == nil || n.Right == nil {
+ if n.Left() == nil || n.Right() == nil {
// not a special composite literal assignment
return false
}
- if n.Left.Type == nil || n.Right.Type == nil {
+ if n.Left().Type() == nil || n.Right().Type() == nil {
// not a special composite literal assignment
return false
}
- if !isSimpleName(n.Left) {
+ if !isSimpleName(n.Left()) {
// not a special composite literal assignment
return false
}
- if !types.Identical(n.Left.Type, n.Right.Type) {
+ if !types.Identical(n.Left().Type(), n.Right().Type()) {
// not a special composite literal assignment
return false
}
- switch n.Right.Op {
+ switch n.Right().Op() {
default:
// not a special composite literal assignment
return false
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
- if vmatch1(n.Left, n.Right) {
+ if vmatch1(n.Left(), n.Right()) {
// not a special composite literal assignment
return false
}
- anylit(n.Right, n.Left, init)
+ anylit(n.Right(), n.Left(), init)
}
- n.Op = ir.OEMPTY
- n.Right = nil
+ n.SetOp(ir.OEMPTY)
+ n.SetRight(nil)
return true
}
return nil
}
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OMETHEXPR:
return ir.SepCopy(n)
case ir.ODOT:
- nam := stataddr(n.Left)
+ nam := stataddr(n.Left())
if nam == nil {
break
}
- nam.Xoffset = nam.Xoffset + n.Xoffset
- nam.Type = n.Type
+ nam.SetOffset(nam.Offset() + n.Offset())
+ nam.SetType(n.Type())
return nam
case ir.OINDEX:
- if n.Left.Type.IsSlice() {
+ if n.Left().Type().IsSlice() {
break
}
- nam := stataddr(n.Left)
+ nam := stataddr(n.Left())
if nam == nil {
break
}
- l := getlit(n.Right)
+ l := getlit(n.Right())
if l < 0 {
break
}
// Check for overflow.
- if n.Type.Width != 0 && thearch.MAXWIDTH/n.Type.Width <= int64(l) {
+ if n.Type().Width != 0 && thearch.MAXWIDTH/n.Type().Width <= int64(l) {
break
}
- nam.Xoffset = nam.Xoffset + int64(l)*n.Type.Width
- nam.Type = n.Type
+ nam.SetOffset(nam.Offset() + int64(l)*n.Type().Width)
+ nam.SetType(n.Type())
return nam
}
}
p := new(InitPlan)
s.initplans[n] = p
- switch n.Op {
+ switch n.Op() {
default:
base.Fatalf("initplan")
case ir.OARRAYLIT, ir.OSLICELIT:
var k int64
- for _, a := range n.List.Slice() {
- if a.Op == ir.OKEY {
- k = indexconst(a.Left)
+ for _, a := range n.List().Slice() {
+ if a.Op() == ir.OKEY {
+ k = indexconst(a.Left())
if k < 0 {
- base.Fatalf("initplan arraylit: invalid index %v", a.Left)
+ base.Fatalf("initplan arraylit: invalid index %v", a.Left())
}
- a = a.Right
+ a = a.Right()
}
- s.addvalue(p, k*n.Type.Elem().Width, a)
+ s.addvalue(p, k*n.Type().Elem().Width, a)
k++
}
case ir.OSTRUCTLIT:
- for _, a := range n.List.Slice() {
- if a.Op != ir.OSTRUCTKEY {
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OSTRUCTKEY {
base.Fatalf("initplan structlit")
}
- if a.Sym.IsBlank() {
+ if a.Sym().IsBlank() {
continue
}
- s.addvalue(p, a.Xoffset, a.Left)
+ s.addvalue(p, a.Offset(), a.Left())
}
case ir.OMAPLIT:
- for _, a := range n.List.Slice() {
- if a.Op != ir.OKEY {
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OKEY {
base.Fatalf("initplan maplit")
}
- s.addvalue(p, -1, a.Right)
+ s.addvalue(p, -1, a.Right())
}
}
}
}
func isZero(n *ir.Node) bool {
- switch n.Op {
+ switch n.Op() {
case ir.ONIL:
return true
}
case ir.OARRAYLIT:
- for _, n1 := range n.List.Slice() {
- if n1.Op == ir.OKEY {
- n1 = n1.Right
+ for _, n1 := range n.List().Slice() {
+ if n1.Op() == ir.OKEY {
+ n1 = n1.Right()
}
if !isZero(n1) {
return false
return true
case ir.OSTRUCTLIT:
- for _, n1 := range n.List.Slice() {
- if !isZero(n1.Left) {
+ for _, n1 := range n.List().Slice() {
+ if !isZero(n1.Left()) {
return false
}
}
}
func isvaluelit(n *ir.Node) bool {
- return n.Op == ir.OARRAYLIT || n.Op == ir.OSTRUCTLIT
+ return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
}
func genAsStatic(as *ir.Node) {
- if as.Left.Type == nil {
+ if as.Left().Type() == nil {
base.Fatalf("genAsStatic as.Left not typechecked")
}
- nam := stataddr(as.Left)
- if nam == nil || (nam.Class() != ir.PEXTERN && as.Left != ir.BlankNode) {
- base.Fatalf("genAsStatic: lhs %v", as.Left)
+ nam := stataddr(as.Left())
+ if nam == nil || (nam.Class() != ir.PEXTERN && as.Left() != ir.BlankNode) {
+ base.Fatalf("genAsStatic: lhs %v", as.Left())
}
switch {
- case as.Right.Op == ir.OLITERAL:
- litsym(nam, as.Right, int(as.Right.Type.Width))
- case (as.Right.Op == ir.ONAME || as.Right.Op == ir.OMETHEXPR) && as.Right.Class() == ir.PFUNC:
- pfuncsym(nam, as.Right)
+ case as.Right().Op() == ir.OLITERAL:
+ litsym(nam, as.Right(), int(as.Right().Type().Width))
+ case (as.Right().Op() == ir.ONAME || as.Right().Op() == ir.OMETHEXPR) && as.Right().Class() == ir.PFUNC:
+ pfuncsym(nam, as.Right())
default:
- base.Fatalf("genAsStatic: rhs %v", as.Right)
+ base.Fatalf("genAsStatic: rhs %v", as.Right())
}
}
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
func getParam(n *ir.Node, i int) *types.Field {
- t := n.Left.Type
- if n.Op == ir.OCALLMETH {
+ t := n.Left().Type()
+ if n.Op() == ir.OCALLMETH {
if i == 0 {
return t.Recv()
}
// - Size of the argument
// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
- x := base.Ctxt.Lookup(s.curfn.Func.LSym.Name + ".opendefer")
- s.curfn.Func.LSym.Func().OpenCodedDeferInfo = x
+ x := base.Ctxt.Lookup(s.curfn.Func().LSym.Name + ".opendefer")
+ s.curfn.Func().LSym.Func().OpenCodedDeferInfo = x
off := 0
// Compute maxargsize (max size of arguments for all defers)
var maxargsize int64
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- argsize := r.n.Left.Type.ArgWidth()
+ argsize := r.n.Left().Type().ArgWidth()
if argsize > maxargsize {
maxargsize = argsize
}
}
off = dvarint(x, off, maxargsize)
- off = dvarint(x, off, -s.deferBitsTemp.Xoffset)
+ off = dvarint(x, off, -s.deferBitsTemp.Offset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- off = dvarint(x, off, r.n.Left.Type.ArgWidth())
- off = dvarint(x, off, -r.closureNode.Xoffset)
+ off = dvarint(x, off, r.n.Left().Type().ArgWidth())
+ off = dvarint(x, off, -r.closureNode.Offset())
numArgs := len(r.argNodes)
if r.rcvrNode != nil {
// If there's an interface receiver, treat/place it as the first
}
off = dvarint(x, off, int64(numArgs))
if r.rcvrNode != nil {
- off = dvarint(x, off, -r.rcvrNode.Xoffset)
+ off = dvarint(x, off, -r.rcvrNode.Offset())
off = dvarint(x, off, s.config.PtrSize)
off = dvarint(x, off, 0)
}
for j, arg := range r.argNodes {
f := getParam(r.n, j)
- off = dvarint(x, off, -arg.Xoffset)
+ off = dvarint(x, off, -arg.Offset())
off = dvarint(x, off, f.Type.Size())
off = dvarint(x, off, f.Offset)
}
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
- ir.FDumpList(astBuf, "buildssa-enter", fn.Func.Enter)
- ir.FDumpList(astBuf, "buildssa-body", fn.Nbody)
- ir.FDumpList(astBuf, "buildssa-exit", fn.Func.Exit)
+ ir.FDumpList(astBuf, "buildssa-enter", fn.Func().Enter)
+ ir.FDumpList(astBuf, "buildssa-body", fn.Body())
+ ir.FDumpList(astBuf, "buildssa-exit", fn.Func().Exit)
if ssaDumpStdout {
fmt.Println("generating SSA for", name)
fmt.Print(astBuf.String())
}
var s state
- s.pushLine(fn.Pos)
+ s.pushLine(fn.Pos())
defer s.popLine()
- s.hasdefer = fn.Func.HasDefer()
- if fn.Func.Pragma&ir.CgoUnsafeArgs != 0 {
+ s.hasdefer = fn.Func().HasDefer()
+ if fn.Func().Pragma&ir.CgoUnsafeArgs != 0 {
s.cgoUnsafeArgs = true
}
s.f = ssa.NewFunc(&fe)
s.config = ssaConfig
- s.f.Type = fn.Type
+ s.f.Type = fn.Type()
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
- if fn.Func.Pragma&ir.Nosplit != 0 {
+ if fn.Func().Pragma&ir.Nosplit != 0 {
s.f.NoSplit = true
}
s.panics = map[funcLine]*ssa.Block{}
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
- s.f.Entry.Pos = fn.Pos
+ s.f.Entry.Pos = fn.Pos()
if printssa {
ssaDF := ssaDumpFile
s.fwdVars = map[*ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
- s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func.OpenCodedDeferDisallowed()
+ s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed()
switch {
case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// that we don't track correctly.
s.hasOpenDefers = false
}
- if s.hasOpenDefers && s.curfn.Func.Exit.Len() > 0 {
+ if s.hasOpenDefers && s.curfn.Func().Exit.Len() > 0 {
// Skip doing open defers if there is any extra exit code (likely
// copying heap-allocated return values or race detection), since
// we will not generate that code in the case of the extra
s.hasOpenDefers = false
}
if s.hasOpenDefers &&
- s.curfn.Func.NumReturns*s.curfn.Func.NumDefers > 15 {
+ s.curfn.Func().NumReturns*s.curfn.Func().NumDefers > 15 {
// Since we are generating defer calls at every exit for
// open-coded defers, skip doing open-coded defers if there are
// too many returns (especially if there are multiple defers).
s.decladdrs = map[*ir.Node]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Func().Dcl {
switch n.Class() {
case ir.PPARAM:
- s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
- args = append(args, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ args = append(args, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())})
case ir.PPARAMOUT:
- s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
- results = append(results, ssa.Param{Type: n.Type, Offset: int32(n.Xoffset)})
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ results = append(results, ssa.Param{Type: n.Type(), Offset: int32(n.Offset())})
if s.canSSA(n) {
// Save ssa-able PPARAMOUT variables so we can
// store them back to the stack at the end of
}
// Populate SSAable arguments.
- for _, n := range fn.Func.Dcl {
+ for _, n := range fn.Func().Dcl {
if n.Class() == ir.PPARAM && s.canSSA(n) {
- v := s.newValue0A(ssa.OpArg, n.Type, n)
+ v := s.newValue0A(ssa.OpArg, n.Type(), n)
s.vars[n] = v
s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
}
}
// Convert the AST-based IR to the SSA-based IR
- s.stmtList(fn.Func.Enter)
- s.stmtList(fn.Nbody)
+ s.stmtList(fn.Func().Enter)
+ s.stmtList(fn.Body())
// fallthrough to exit
if s.curBlock != nil {
- s.pushLine(fn.Func.Endlineno)
+ s.pushLine(fn.Func().Endlineno)
s.exit()
s.popLine()
}
func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Node) {
// Read sources of target function fn.
- fname := base.Ctxt.PosTable.Pos(fn.Pos).Filename()
- targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line())
+ fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
+ targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line())
if err != nil {
writer.Logf("cannot read sources for function %v: %v", fn, err)
}
var inlFns []*ssa.FuncLines
for _, fi := range ssaDumpInlined {
var elno src.XPos
- if fi.Name.Defn == nil {
+ if fi.Name().Defn == nil {
// Endlineno is filled from exported data.
- elno = fi.Func.Endlineno
+ elno = fi.Func().Endlineno
} else {
- elno = fi.Name.Defn.Func.Endlineno
+ elno = fi.Name().Defn.Func().Endlineno
}
- fname := base.Ctxt.PosTable.Pos(fi.Pos).Filename()
- fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line())
+ fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
+ fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
if err != nil {
writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
continue
}
func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
- if !s.curfn.Func.InstrumentBody() {
+ if !s.curfn.Func().InstrumentBody() {
return
}
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *ir.Node) {
- if !(n.Op == ir.OVARKILL || n.Op == ir.OVARLIVE || n.Op == ir.OVARDEF) {
+ if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
- s.pushLine(n.Pos)
+ s.pushLine(n.Pos())
defer s.popLine()
}
// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
// then this code is dead. Stop here.
- if s.curBlock == nil && n.Op != ir.OLABEL {
+ if s.curBlock == nil && n.Op() != ir.OLABEL {
return
}
- s.stmtList(n.Ninit)
- switch n.Op {
+ s.stmtList(n.Init())
+ switch n.Op() {
case ir.OBLOCK:
- s.stmtList(n.List)
+ s.stmtList(n.List())
// No-ops
case ir.OEMPTY, ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
case ir.OCALLMETH, ir.OCALLINTER:
s.callResult(n, callNormal)
- if n.Op == ir.OCALLFUNC && n.Left.Op == ir.ONAME && n.Left.Class() == ir.PFUNC {
- if fn := n.Left.Sym.Name; base.Flag.CompilingRuntime && fn == "throw" ||
- n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && n.Left().Class() == ir.PFUNC {
+ if fn := n.Left().Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+ n.Left().Sym().Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
m := s.mem()
b := s.endBlock()
b.Kind = ssa.BlockExit
var defertype string
if s.hasOpenDefers {
defertype = "open-coded"
- } else if n.Esc == EscNever {
+ } else if n.Esc() == EscNever {
defertype = "stack-allocated"
} else {
defertype = "heap-allocated"
}
- base.WarnfAt(n.Pos, "%s defer", defertype)
+ base.WarnfAt(n.Pos(), "%s defer", defertype)
}
if s.hasOpenDefers {
- s.openDeferRecord(n.Left)
+ s.openDeferRecord(n.Left())
} else {
d := callDefer
- if n.Esc == EscNever {
+ if n.Esc() == EscNever {
d = callDeferStack
}
- s.callResult(n.Left, d)
+ s.callResult(n.Left(), d)
}
case ir.OGO:
- s.callResult(n.Left, callGo)
+ s.callResult(n.Left(), callGo)
case ir.OAS2DOTTYPE:
- res, resok := s.dottype(n.Right, true)
+ res, resok := s.dottype(n.Right(), true)
deref := false
- if !canSSAType(n.Right.Type) {
+ if !canSSAType(n.Right().Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
deref = true
res = res.Args[0]
}
- s.assign(n.List.First(), res, deref, 0)
- s.assign(n.List.Second(), resok, false, 0)
+ s.assign(n.List().First(), res, deref, 0)
+ s.assign(n.List().Second(), resok, false, 0)
return
case ir.OAS2FUNC:
// We come here only when it is an intrinsic call returning two values.
- if !isIntrinsicCall(n.Right) {
- s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right)
- }
- v := s.intrinsicCall(n.Right)
- v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
- v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
- s.assign(n.List.First(), v1, false, 0)
- s.assign(n.List.Second(), v2, false, 0)
+ if !isIntrinsicCall(n.Right()) {
+ s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Right())
+ }
+ v := s.intrinsicCall(n.Right())
+ v1 := s.newValue1(ssa.OpSelect0, n.List().First().Type(), v)
+ v2 := s.newValue1(ssa.OpSelect1, n.List().Second().Type(), v)
+ s.assign(n.List().First(), v1, false, 0)
+ s.assign(n.List().Second(), v2, false, 0)
return
case ir.ODCL:
- if n.Left.Class() == ir.PAUTOHEAP {
+ if n.Left().Class() == ir.PAUTOHEAP {
s.Fatalf("DCL %v", n)
}
case ir.OLABEL:
- sym := n.Sym
+ sym := n.Sym()
lab := s.label(sym)
// Associate label with its control flow node, if any
s.startBlock(lab.target)
case ir.OGOTO:
- sym := n.Sym
+ sym := n.Sym()
lab := s.label(sym)
if lab.target == nil {
b.AddEdgeTo(lab.target)
case ir.OAS:
- if n.Left == n.Right && n.Left.Op == ir.ONAME {
+ if n.Left() == n.Right() && n.Left().Op() == ir.ONAME {
// An x=x assignment. No point in doing anything
// here. In addition, skipping this assignment
// prevents generating:
}
// Evaluate RHS.
- rhs := n.Right
+ rhs := n.Right()
if rhs != nil {
- switch rhs.Op {
+ switch rhs.Op() {
case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// Check whether we're writing the result of an append back to the same slice.
// If so, we handle it specially to avoid write barriers on the fast
// (non-growth) path.
- if !samesafeexpr(n.Left, rhs.List.First()) || base.Flag.N != 0 {
+ if !samesafeexpr(n.Left(), rhs.List().First()) || base.Flag.N != 0 {
break
}
// If the slice can be SSA'd, it'll be on the stack,
// so there will be no write barriers,
// so there's no need to attempt to prevent them.
- if s.canSSA(n.Left) {
+ if s.canSSA(n.Left()) {
if base.Debug.Append > 0 { // replicating old diagnostic message
- base.WarnfAt(n.Pos, "append: len-only update (in local slice)")
+ base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
}
break
}
if base.Debug.Append > 0 {
- base.WarnfAt(n.Pos, "append: len-only update")
+ base.WarnfAt(n.Pos(), "append: len-only update")
}
s.append(rhs, true)
return
}
}
- if ir.IsBlank(n.Left) {
+ if ir.IsBlank(n.Left()) {
// _ = rhs
// Just evaluate rhs for side-effects.
if rhs != nil {
}
var t *types.Type
- if n.Right != nil {
- t = n.Right.Type
+ if n.Right() != nil {
+ t = n.Right().Type()
} else {
- t = n.Left.Type
+ t = n.Left().Type()
}
var r *ssa.Value
}
var skip skipMask
- if rhs != nil && (rhs.Op == ir.OSLICE || rhs.Op == ir.OSLICE3 || rhs.Op == ir.OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
+ if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && samesafeexpr(rhs.Left(), n.Left()) {
// We're assigning a slicing operation back to its source.
// Don't write back fields we aren't changing. See issue #14855.
i, j, k := rhs.SliceBounds()
- if i != nil && (i.Op == ir.OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) {
+ if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && i.Int64Val() == 0) {
// [0:...] is the same as [:...]
i = nil
}
}
}
- s.assign(n.Left, r, deref, skip)
+ s.assign(n.Left(), r, deref, skip)
case ir.OIF:
- if ir.IsConst(n.Left, constant.Bool) {
- s.stmtList(n.Left.Ninit)
- if n.Left.BoolVal() {
- s.stmtList(n.Nbody)
+ if ir.IsConst(n.Left(), constant.Bool) {
+ s.stmtList(n.Left().Init())
+ if n.Left().BoolVal() {
+ s.stmtList(n.Body())
} else {
- s.stmtList(n.Rlist)
+ s.stmtList(n.Rlist())
}
break
}
likely = 1
}
var bThen *ssa.Block
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
bThen = s.f.NewBlock(ssa.BlockPlain)
} else {
bThen = bEnd
}
var bElse *ssa.Block
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
} else {
bElse = bEnd
}
- s.condBranch(n.Left, bThen, bElse, likely)
+ s.condBranch(n.Left(), bThen, bElse, likely)
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
s.startBlock(bThen)
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
}
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
s.startBlock(bElse)
- s.stmtList(n.Rlist)
+ s.stmtList(n.Rlist())
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bEnd)
}
s.startBlock(bEnd)
case ir.ORETURN:
- s.stmtList(n.List)
+ s.stmtList(n.List())
b := s.exit()
b.Pos = s.lastPos.WithIsStmt()
case ir.ORETJMP:
- s.stmtList(n.List)
+ s.stmtList(n.List())
b := s.exit()
b.Kind = ssa.BlockRetJmp // override BlockRet
- b.Aux = n.Sym.Linksym()
+ b.Aux = n.Sym().Linksym()
case ir.OCONTINUE, ir.OBREAK:
var to *ssa.Block
- if n.Sym == nil {
+ if n.Sym() == nil {
// plain break/continue
- switch n.Op {
+ switch n.Op() {
case ir.OCONTINUE:
to = s.continueTo
case ir.OBREAK:
}
} else {
// labeled break/continue; look up the target
- sym := n.Sym
+ sym := n.Sym()
lab := s.label(sym)
- switch n.Op {
+ switch n.Op() {
case ir.OCONTINUE:
to = lab.continueTarget
case ir.OBREAK:
bEnd := s.f.NewBlock(ssa.BlockPlain)
// ensure empty for loops have correct position; issue #30167
- bBody.Pos = n.Pos
+ bBody.Pos = n.Pos()
// first, jump to condition test (OFOR) or body (OFORUNTIL)
b := s.endBlock()
- if n.Op == ir.OFOR {
+ if n.Op() == ir.OFOR {
b.AddEdgeTo(bCond)
// generate code to test condition
s.startBlock(bCond)
- if n.Left != nil {
- s.condBranch(n.Left, bBody, bEnd, 1)
+ if n.Left() != nil {
+ s.condBranch(n.Left(), bBody, bEnd, 1)
} else {
b := s.endBlock()
b.Kind = ssa.BlockPlain
// generate body
s.startBlock(bBody)
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
// tear down continue/break
s.continueTo = prevContinue
// generate incr (and, for OFORUNTIL, condition)
s.startBlock(bIncr)
- if n.Right != nil {
- s.stmt(n.Right)
+ if n.Right() != nil {
+ s.stmt(n.Right())
}
- if n.Op == ir.OFOR {
+ if n.Op() == ir.OFOR {
if b := s.endBlock(); b != nil {
b.AddEdgeTo(bCond)
// It can happen that bIncr ends in a block containing only VARKILL,
// and that muddles the debugging experience.
- if n.Op != ir.OFORUNTIL && b.Pos == src.NoXPos {
+ if n.Op() != ir.OFORUNTIL && b.Pos == src.NoXPos {
b.Pos = bCond.Pos
}
}
// bCond is unused in OFORUNTIL, so repurpose it.
bLateIncr := bCond
// test condition
- s.condBranch(n.Left, bLateIncr, bEnd, 1)
+ s.condBranch(n.Left(), bLateIncr, bEnd, 1)
// generate late increment
s.startBlock(bLateIncr)
- s.stmtList(n.List)
+ s.stmtList(n.List())
s.endBlock().AddEdgeTo(bBody)
}
}
// generate body code
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
s.breakTo = prevBreak
if lab != nil {
s.startBlock(bEnd)
case ir.OVARDEF:
- if !s.canSSA(n.Left) {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
+ if !s.canSSA(n.Left()) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left(), s.mem(), false)
}
case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
- if !s.canSSA(n.Left) {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false)
+ if !s.canSSA(n.Left()) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left(), s.mem(), false)
}
case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
- if !n.Left.Name.Addrtaken() {
- s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
+ if !n.Left().Name().Addrtaken() {
+ s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left())
}
- switch n.Left.Class() {
+ switch n.Left().Class() {
case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
default:
- s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
+ s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left())
}
- s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
+ s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left(), s.mem())
case ir.OCHECKNIL:
- p := s.expr(n.Left)
+ p := s.expr(n.Left())
s.nilCheck(p)
case ir.OINLMARK:
- s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem())
+ s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Offset(), s.mem())
default:
- s.Fatalf("unhandled stmt %v", n.Op)
+ s.Fatalf("unhandled stmt %v", n.Op())
}
}
// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
- s.stmtList(s.curfn.Func.Exit)
+ s.stmtList(s.curfn.Func().Exit)
// Store SSAable PPARAMOUT variables back to stack locations.
for _, n := range s.returns {
addr := s.decladdrs[n]
- val := s.variable(n, n.Type)
+ val := s.variable(n, n.Type())
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
- s.store(n.Type, addr, val)
+ s.store(n.Type(), addr, val)
// TODO: if val is ever spilled, we'd like to use the
// PPARAMOUT slot for spilling it. That won't happen
// currently.
if hasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
- s.pushLine(n.Pos)
+ s.pushLine(n.Pos())
defer s.popLine()
}
- s.stmtList(n.Ninit)
- switch n.Op {
+ s.stmtList(n.Init())
+ switch n.Op() {
case ir.OBYTES2STRTMP:
- slice := s.expr(n.Left)
+ slice := s.expr(n.Left())
ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
- return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
+ return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
case ir.OSTR2BYTESTMP:
- str := s.expr(n.Left)
+ str := s.expr(n.Left())
ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
- return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
case ir.OCFUNC:
- aux := n.Left.Sym.Linksym()
- return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
+ aux := n.Left().Sym().Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
case ir.OMETHEXPR:
- sym := funcsym(n.Sym).Linksym()
- return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
+ sym := funcsym(n.Sym()).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
case ir.ONAME:
if n.Class() == ir.PFUNC {
// "value" of a function is the address of the function's closure
- sym := funcsym(n.Sym).Linksym()
- return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
+ sym := funcsym(n.Sym()).Linksym()
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
}
if s.canSSA(n) {
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
addr := s.addr(n)
- return s.load(n.Type, addr)
+ return s.load(n.Type(), addr)
case ir.OCLOSUREVAR:
addr := s.addr(n)
- return s.load(n.Type, addr)
+ return s.load(n.Type(), addr)
case ir.ONIL:
- t := n.Type
+ t := n.Type()
switch {
case t.IsSlice():
return s.constSlice(t)
case ir.OLITERAL:
switch u := n.Val(); u.Kind() {
case constant.Int:
- i := ir.Int64Val(n.Type, u)
- switch n.Type.Size() {
+ i := ir.Int64Val(n.Type(), u)
+ switch n.Type().Size() {
case 1:
- return s.constInt8(n.Type, int8(i))
+ return s.constInt8(n.Type(), int8(i))
case 2:
- return s.constInt16(n.Type, int16(i))
+ return s.constInt16(n.Type(), int16(i))
case 4:
- return s.constInt32(n.Type, int32(i))
+ return s.constInt32(n.Type(), int32(i))
case 8:
- return s.constInt64(n.Type, i)
+ return s.constInt64(n.Type(), i)
default:
- s.Fatalf("bad integer size %d", n.Type.Size())
+ s.Fatalf("bad integer size %d", n.Type().Size())
return nil
}
case constant.String:
i := constant.StringVal(u)
if i == "" {
- return s.constEmptyString(n.Type)
+ return s.constEmptyString(n.Type())
}
- return s.entryNewValue0A(ssa.OpConstString, n.Type, i)
+ return s.entryNewValue0A(ssa.OpConstString, n.Type(), i)
case constant.Bool:
return s.constBool(constant.BoolVal(u))
case constant.Float:
f, _ := constant.Float64Val(u)
- switch n.Type.Size() {
+ switch n.Type().Size() {
case 4:
- return s.constFloat32(n.Type, f)
+ return s.constFloat32(n.Type(), f)
case 8:
- return s.constFloat64(n.Type, f)
+ return s.constFloat64(n.Type(), f)
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad float size %d", n.Type().Size())
return nil
}
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
- switch n.Type.Size() {
+ switch n.Type().Size() {
case 8:
pt := types.Types[types.TFLOAT32]
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.constFloat32(pt, re),
s.constFloat32(pt, im))
case 16:
pt := types.Types[types.TFLOAT64]
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.constFloat64(pt, re),
s.constFloat64(pt, im))
default:
- s.Fatalf("bad complex size %d", n.Type.Size())
+ s.Fatalf("bad complex size %d", n.Type().Size())
return nil
}
default:
return nil
}
case ir.OCONVNOP:
- to := n.Type
- from := n.Left.Type
+ to := n.Type()
+ from := n.Left().Type()
// Assume everything will work out, so set up our return value.
// Anything interesting that happens from here is a fatal.
- x := s.expr(n.Left)
+ x := s.expr(n.Left())
// Special case for not confusing GC and liveness.
// We don't want pointers accidentally classified
return v
case ir.OCONV:
- x := s.expr(n.Left)
- ft := n.Left.Type // from type
- tt := n.Type // to type
+ x := s.expr(n.Left())
+ ft := n.Left().Type() // from type
+ tt := n.Type() // to type
if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
- return s.newValue1(ssa.OpCopy, n.Type, x)
+ return s.newValue1(ssa.OpCopy, n.Type(), x)
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
}
}
- return s.newValue1(op, n.Type, x)
+ return s.newValue1(op, n.Type(), x)
}
if ft.IsFloat() || tt.IsFloat() {
if op2 == ssa.OpCopy {
return x
}
- return s.newValueOrSfCall1(op2, n.Type, x)
+ return s.newValueOrSfCall1(op2, n.Type(), x)
}
if op2 == ssa.OpCopy {
- return s.newValueOrSfCall1(op1, n.Type, x)
+ return s.newValueOrSfCall1(op1, n.Type(), x)
}
- return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
+ return s.newValueOrSfCall1(op2, n.Type(), s.newValueOrSfCall1(op1, types.Types[it], x))
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
- s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
+ s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Etype, n.Type().Etype)
return nil
case ir.ODOTTYPE:
// binary ops
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Left.Type.IsComplex() {
- pt := floatForComplex(n.Left.Type)
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Left().Type().IsComplex() {
+ pt := floatForComplex(n.Left().Type())
op := s.ssaOp(ir.OEQ, pt)
r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
- switch n.Op {
+ switch n.Op() {
case ir.OEQ:
return c
case ir.ONE:
return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
default:
- s.Fatalf("ordered complex compare %v", n.Op)
+ s.Fatalf("ordered complex compare %v", n.Op())
}
}
// Convert OGE and OGT into OLE and OLT.
- op := n.Op
+ op := n.Op()
switch op {
case ir.OGE:
op, a, b = ir.OLE, b, a
case ir.OGT:
op, a, b = ir.OLT, b, a
}
- if n.Left.Type.IsFloat() {
+ if n.Left().Type().IsFloat() {
// float comparison
- return s.newValueOrSfCall2(s.ssaOp(op, n.Left.Type), types.Types[types.TBOOL], a, b)
+ return s.newValueOrSfCall2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
}
// integer comparison
- return s.newValue2(s.ssaOp(op, n.Left.Type), types.Types[types.TBOOL], a, b)
+ return s.newValue2(s.ssaOp(op, n.Left().Type()), types.Types[types.TBOOL], a, b)
case ir.OMUL:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
mulop := ssa.OpMul64F
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
- pt := floatForComplex(n.Type) // Could be Float32 or Float64
+ pt := floatForComplex(n.Type()) // Could be Float32 or Float64
wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
- return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.ODIV:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
// TODO this is not executed because the front-end substitutes a runtime call.
// That probably ought to change; with modest optimization the widen/narrow
// conversions could all be elided in larger expression trees.
addop := ssa.OpAdd64F
subop := ssa.OpSub64F
divop := ssa.OpDiv64F
- pt := floatForComplex(n.Type) // Could be Float32 or Float64
+ pt := floatForComplex(n.Type()) // Could be Float32 or Float64
wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
areal := s.newValue1(ssa.OpComplexReal, pt, a)
xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
}
- return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
return s.intDivide(n, a, b)
case ir.OMOD:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
return s.intDivide(n, a, b)
case ir.OADD, ir.OSUB:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- if n.Type.IsComplex() {
- pt := floatForComplex(n.Type)
- op := s.ssaOp(n.Op, pt)
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ if n.Type().IsComplex() {
+ pt := floatForComplex(n.Type())
+ op := s.ssaOp(n.Op(), pt)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
}
- if n.Type.IsFloat() {
- return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OAND, ir.OOR, ir.OXOR:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
case ir.OANDNOT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
- return s.newValue2(s.ssaOp(ir.OAND, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
case ir.OLSH, ir.ORSH:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
+ a := s.expr(n.Left())
+ b := s.expr(n.Right())
bt := b.Type
if bt.IsSigned() {
cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
s.check(cmp, panicshift)
bt = bt.ToUnsigned()
}
- return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b)
+ return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
case ir.OANDAND, ir.OOROR:
// To implement OANDAND (and OOROR), we introduce a
// new temporary variable to hold the result. The
// }
// Using var in the subsequent block introduces the
// necessary phi variable.
- el := s.expr(n.Left)
+ el := s.expr(n.Left())
s.vars[n] = el
b := s.endBlock()
bRight := s.f.NewBlock(ssa.BlockPlain)
bResult := s.f.NewBlock(ssa.BlockPlain)
- if n.Op == ir.OANDAND {
+ if n.Op() == ir.OANDAND {
b.AddEdgeTo(bRight)
b.AddEdgeTo(bResult)
- } else if n.Op == ir.OOROR {
+ } else if n.Op() == ir.OOROR {
b.AddEdgeTo(bResult)
b.AddEdgeTo(bRight)
}
s.startBlock(bRight)
- er := s.expr(n.Right)
+ er := s.expr(n.Right())
s.vars[n] = er
b = s.endBlock()
s.startBlock(bResult)
return s.variable(n, types.Types[types.TBOOL])
case ir.OCOMPLEX:
- r := s.expr(n.Left)
- i := s.expr(n.Right)
- return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
+ r := s.expr(n.Left())
+ i := s.expr(n.Right())
+ return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
// unary ops
case ir.ONEG:
- a := s.expr(n.Left)
- if n.Type.IsComplex() {
- tp := floatForComplex(n.Type)
- negop := s.ssaOp(n.Op, tp)
- return s.newValue2(ssa.OpComplexMake, n.Type,
+ a := s.expr(n.Left())
+ if n.Type().IsComplex() {
+ tp := floatForComplex(n.Type())
+ negop := s.ssaOp(n.Op(), tp)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
}
- return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.ONOT, ir.OBITNOT:
- a := s.expr(n.Left)
- return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
+ a := s.expr(n.Left())
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
case ir.OIMAG, ir.OREAL:
- a := s.expr(n.Left)
- return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
+ a := s.expr(n.Left())
+ return s.newValue1(s.ssaOp(n.Op(), n.Left().Type()), n.Type(), a)
case ir.OPLUS:
- return s.expr(n.Left)
+ return s.expr(n.Left())
case ir.OADDR:
- return s.addr(n.Left)
+ return s.addr(n.Left())
case ir.ORESULT:
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
// Do the old thing
- addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.rawLoad(n.Type, addr)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ return s.rawLoad(n.Type(), addr)
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
if which == -1 {
// Do the old thing // TODO: Panic instead.
- addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
- return s.rawLoad(n.Type, addr)
+ addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset())
+ return s.rawLoad(n.Type(), addr)
}
- if canSSAType(n.Type) {
- return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
+ if canSSAType(n.Type()) {
+ return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall)
} else {
- addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
- return s.rawLoad(n.Type, addr)
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall)
+ return s.rawLoad(n.Type(), addr)
}
case ir.ODEREF:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- return s.load(n.Type, p)
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ return s.load(n.Type(), p)
case ir.ODOT:
- if n.Left.Op == ir.OSTRUCTLIT {
+ if n.Left().Op() == ir.OSTRUCTLIT {
// All literals with nonzero fields have already been
// rewritten during walk. Any that remain are just T{}
// or equivalents. Use the zero value.
- if !isZero(n.Left) {
- s.Fatalf("literal with nonzero value in SSA: %v", n.Left)
+ if !isZero(n.Left()) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n.Left())
}
- return s.zeroVal(n.Type)
+ return s.zeroVal(n.Type())
}
// If n is addressable and can't be represented in
// SSA, then load just the selected field. This
// instrumentation.
if islvalue(n) && !s.canSSA(n) {
p := s.addr(n)
- return s.load(n.Type, p)
+ return s.load(n.Type(), p)
}
- v := s.expr(n.Left)
- return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
+ v := s.expr(n.Left())
+ return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
case ir.ODOTPTR:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
- return s.load(n.Type, p)
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
+ return s.load(n.Type(), p)
case ir.OINDEX:
switch {
- case n.Left.Type.IsString():
- if n.Bounded() && ir.IsConst(n.Left, constant.String) && ir.IsConst(n.Right, constant.Int) {
+ case n.Left().Type().IsString():
+ if n.Bounded() && ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.Int) {
// Replace "abc"[1] with 'b'.
// Delayed until now because "abc"[1] is not an ideal constant.
// See test/fixedbugs/issue11370.go.
- return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(n.Left.StringVal()[n.Right.Int64Val()])))
+ return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(n.Left().StringVal()[n.Right().Int64Val()])))
}
- a := s.expr(n.Left)
- i := s.expr(n.Right)
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
ptrtyp := s.f.Config.Types.BytePtr
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
- if ir.IsConst(n.Right, constant.Int) {
- ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64Val(), ptr)
+ if ir.IsConst(n.Right(), constant.Int) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right().Int64Val(), ptr)
} else {
ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
}
return s.load(types.Types[types.TUINT8], ptr)
- case n.Left.Type.IsSlice():
+ case n.Left().Type().IsSlice():
p := s.addr(n)
- return s.load(n.Left.Type.Elem(), p)
- case n.Left.Type.IsArray():
- if canSSAType(n.Left.Type) {
+ return s.load(n.Left().Type().Elem(), p)
+ case n.Left().Type().IsArray():
+ if canSSAType(n.Left().Type()) {
// SSA can handle arrays of length at most 1.
- bound := n.Left.Type.NumElem()
- a := s.expr(n.Left)
- i := s.expr(n.Right)
+ bound := n.Left().Type().NumElem()
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
if bound == 0 {
// Bounds check will never succeed. Might as well
// use constants for the bounds check.
z := s.constInt(types.Types[types.TINT], 0)
s.boundsCheck(z, z, ssa.BoundsIndex, false)
// The return value won't be live, return junk.
- return s.newValue0(ssa.OpUnknown, n.Type)
+ return s.newValue0(ssa.OpUnknown, n.Type())
}
len := s.constInt(types.Types[types.TINT], bound)
s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
- return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
+ return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
}
p := s.addr(n)
- return s.load(n.Left.Type.Elem(), p)
+ return s.load(n.Left().Type().Elem(), p)
default:
- s.Fatalf("bad type for index %v", n.Left.Type)
+ s.Fatalf("bad type for index %v", n.Left().Type())
return nil
}
case ir.OLEN, ir.OCAP:
switch {
- case n.Left.Type.IsSlice():
+ case n.Left().Type().IsSlice():
op := ssa.OpSliceLen
- if n.Op == ir.OCAP {
+ if n.Op() == ir.OCAP {
op = ssa.OpSliceCap
}
- return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left))
- case n.Left.Type.IsString(): // string; not reachable for OCAP
- return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left))
- case n.Left.Type.IsMap(), n.Left.Type.IsChan():
- return s.referenceTypeBuiltin(n, s.expr(n.Left))
+ return s.newValue1(op, types.Types[types.TINT], s.expr(n.Left()))
+ case n.Left().Type().IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.Left()))
+ case n.Left().Type().IsMap(), n.Left().Type().IsChan():
+ return s.referenceTypeBuiltin(n, s.expr(n.Left()))
default: // array
- return s.constInt(types.Types[types.TINT], n.Left.Type.NumElem())
+ return s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
}
case ir.OSPTR:
- a := s.expr(n.Left)
- if n.Left.Type.IsSlice() {
- return s.newValue1(ssa.OpSlicePtr, n.Type, a)
+ a := s.expr(n.Left())
+ if n.Left().Type().IsSlice() {
+ return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
} else {
- return s.newValue1(ssa.OpStringPtr, n.Type, a)
+ return s.newValue1(ssa.OpStringPtr, n.Type(), a)
}
case ir.OITAB:
- a := s.expr(n.Left)
- return s.newValue1(ssa.OpITab, n.Type, a)
+ a := s.expr(n.Left())
+ return s.newValue1(ssa.OpITab, n.Type(), a)
case ir.OIDATA:
- a := s.expr(n.Left)
- return s.newValue1(ssa.OpIData, n.Type, a)
+ a := s.expr(n.Left())
+ return s.newValue1(ssa.OpIData, n.Type(), a)
case ir.OEFACE:
- tab := s.expr(n.Left)
- data := s.expr(n.Right)
- return s.newValue2(ssa.OpIMake, n.Type, tab, data)
+ tab := s.expr(n.Left())
+ data := s.expr(n.Right())
+ return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
case ir.OSLICEHEADER:
- p := s.expr(n.Left)
- l := s.expr(n.List.First())
- c := s.expr(n.List.Second())
- return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+ p := s.expr(n.Left())
+ l := s.expr(n.List().First())
+ c := s.expr(n.List().Second())
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
- v := s.expr(n.Left)
+ v := s.expr(n.Left())
var i, j, k *ssa.Value
low, high, max := n.SliceBounds()
if low != nil {
k = s.expr(max)
}
p, l, c := s.slice(v, i, j, k, n.Bounded())
- return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
case ir.OSLICESTR:
- v := s.expr(n.Left)
+ v := s.expr(n.Left())
var i, j *ssa.Value
low, high, _ := n.SliceBounds()
if low != nil {
j = s.expr(high)
}
p, l, _ := s.slice(v, i, j, nil, n.Bounded())
- return s.newValue2(ssa.OpStringMake, n.Type, p, l)
+ return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
case ir.OCALLFUNC:
if isIntrinsicCall(n) {
return s.callResult(n, callNormal)
case ir.OGETG:
- return s.newValue1(ssa.OpGetG, n.Type, s.mem())
+ return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
case ir.OAPPEND:
return s.append(n, false)
if !isZero(n) {
s.Fatalf("literal with nonzero value in SSA: %v", n)
}
- return s.zeroVal(n.Type)
+ return s.zeroVal(n.Type())
case ir.ONEWOBJ:
- if n.Type.Elem().Size() == 0 {
- return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb)
+ if n.Type().Elem().Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, n.Type(), zerobaseSym, s.sb)
}
- typ := s.expr(n.Left)
- vv := s.rtcall(newobject, true, []*types.Type{n.Type}, typ)
+ typ := s.expr(n.Left())
+ vv := s.rtcall(newobject, true, []*types.Type{n.Type()}, typ)
return vv[0]
default:
- s.Fatalf("unhandled expr %v", n.Op)
+ s.Fatalf("unhandled expr %v", n.Op())
return nil
}
}
// *(ptr+len+1) = e2
// *(ptr+len+2) = e3
- et := n.Type.Elem()
+ et := n.Type().Elem()
pt := types.NewPtr(et)
// Evaluate slice
- sn := n.List.First() // the slice node is the first in the list
+ sn := n.List().First() // the slice node is the first in the list
var slice, addr *ssa.Value
if inplace {
addr = s.addr(sn)
- slice = s.load(n.Type, addr)
+ slice = s.load(n.Type(), addr)
} else {
slice = s.expr(sn)
}
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
- nargs := int64(n.List.Len() - 1)
+ nargs := int64(n.List().Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
// Call growslice
s.startBlock(grow)
- taddr := s.expr(n.Left)
+ taddr := s.expr(n.Left())
r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
if inplace {
- if sn.Op == ir.ONAME && sn.Class() != ir.PEXTERN {
+ if sn.Op() == ir.ONAME && sn.Class() != ir.PEXTERN {
// Tell liveness we're about to build a new slice
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
}
store bool
}
args := make([]argRec, 0, nargs)
- for _, n := range n.List.Slice()[1:] {
- if canSSAType(n.Type) {
+ for _, n := range n.List().Slice()[1:] {
+ if canSSAType(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
delete(s.vars, newlenVar)
delete(s.vars, capVar)
// make result
- return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
}
// condBranch evaluates the boolean expression cond and branches to yes
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
func (s *state) condBranch(cond *ir.Node, yes, no *ssa.Block, likely int8) {
- switch cond.Op {
+ switch cond.Op() {
case ir.OANDAND:
mid := s.f.NewBlock(ssa.BlockPlain)
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, mid, no, max8(likely, 0))
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), mid, no, max8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right, yes, no, likely)
+ s.condBranch(cond.Right(), yes, no, likely)
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
// OANDAND and OOROR nodes (if it ever has such info).
case ir.OOROR:
mid := s.f.NewBlock(ssa.BlockPlain)
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, yes, mid, min8(likely, 0))
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), yes, mid, min8(likely, 0))
s.startBlock(mid)
- s.condBranch(cond.Right, yes, no, likely)
+ s.condBranch(cond.Right(), yes, no, likely)
return
// Note: if likely==-1, then both recursive calls pass -1.
// If likely==1, then we don't have enough info to decide
// the likelihood of the first branch.
case ir.ONOT:
- s.stmtList(cond.Ninit)
- s.condBranch(cond.Left, no, yes, -likely)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.Left(), no, yes, -likely)
return
}
c := s.expr(cond)
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMask) {
- if left.Op == ir.ONAME && ir.IsBlank(left) {
+ if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
- t := left.Type
+ t := left.Type()
dowidth(t)
if s.canSSA(left) {
if deref {
s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
}
- if left.Op == ir.ODOT {
+ if left.Op() == ir.ODOT {
// We're assigning to a field of an ssa-able value.
// We need to build a new structure with the new value for the
// field we're assigning and the old values for the other fields.
// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
// Grab information about the structure type.
- t := left.Left.Type
+ t := left.Left().Type()
nf := t.NumFields()
idx := fieldIdx(left)
// Grab old value of structure.
- old := s.expr(left.Left)
+ old := s.expr(left.Left())
// Make new structure.
new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
}
// Recursively assign the new value we've made to the base of the dot op.
- s.assign(left.Left, new, false, 0)
+ s.assign(left.Left(), new, false, 0)
// TODO: do we need to update named values here?
return
}
- if left.Op == ir.OINDEX && left.Left.Type.IsArray() {
- s.pushLine(left.Pos)
+ if left.Op() == ir.OINDEX && left.Left().Type().IsArray() {
+ s.pushLine(left.Pos())
defer s.popLine()
// We're assigning to an element of an ssa-able array.
// a[i] = v
- t := left.Left.Type
+ t := left.Left().Type()
n := t.NumElem()
- i := s.expr(left.Right) // index
+ i := s.expr(left.Right()) // index
if n == 0 {
// The bounds check must fail. Might as well
// ignore the actual index and just use zeros.
len := s.constInt(types.Types[types.TINT], 1)
s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
v := s.newValue1(ssa.OpArrayMake1, t, right)
- s.assign(left.Left, v, false, 0)
+ s.assign(left.Left(), v, false, 0)
return
}
// Update variable assignment.
// If this assignment clobbers an entire local variable, then emit
// OpVarDef so liveness analysis knows the variable is redefined.
- if base := clobberBase(left); base.Op == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 {
+ if base := clobberBase(left); base.Op() == ir.ONAME && base.Class() != ir.PEXTERN && skip == 0 {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
}
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
- return s.newValue2(ssa.OpStringMake, n.Type, args[0], args[1])
+ return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
},
all...)
}
}
func isIntrinsicCall(n *ir.Node) bool {
- if n == nil || n.Left == nil {
+ if n == nil || n.Left() == nil {
return false
}
- return findIntrinsic(n.Left.Sym) != nil
+ return findIntrinsic(n.Left().Sym()) != nil
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *ir.Node) *ssa.Value {
- v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
+ v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
x = x.Args[0]
}
- base.WarnfAt(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
+ base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Left().Sym().Name, x.LongString())
}
return v
}
func (s *state) intrinsicArgs(n *ir.Node) []*ssa.Value {
// Construct map of temps; see comments in s.call about the structure of n.
temps := map[*ir.Node]*ssa.Value{}
- for _, a := range n.List.Slice() {
- if a.Op != ir.OAS {
- s.Fatalf("non-assignment as a temp function argument %v", a.Op)
+ for _, a := range n.List().Slice() {
+ if a.Op() != ir.OAS {
+ s.Fatalf("non-assignment as a temp function argument %v", a.Op())
}
- l, r := a.Left, a.Right
- if l.Op != ir.ONAME {
- s.Fatalf("non-ONAME temp function argument %v", a.Op)
+ l, r := a.Left(), a.Right()
+ if l.Op() != ir.ONAME {
+ s.Fatalf("non-ONAME temp function argument %v", a.Op())
}
// Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
}
- args := make([]*ssa.Value, n.Rlist.Len())
- for i, n := range n.Rlist.Slice() {
+ args := make([]*ssa.Value, n.Rlist().Len())
+ for i, n := range n.Rlist().Slice() {
// Store a value to an argument slot.
if x, ok := temps[n]; ok {
// This is a previously computed temporary.
// once.mutex'. Such a statement will create a mapping in s.vars[] from
// the autotmp name to the evaluated SSA arg value, but won't do any
// stores to the stack.
- s.stmtList(n.List)
+ s.stmtList(n.List())
var args []*ssa.Value
var argNodes []*ir.Node
opendefer := &openDeferInfo{
n: n,
}
- fn := n.Left
- if n.Op == ir.OCALLFUNC {
+ fn := n.Left()
+ if n.Op() == ir.OCALLFUNC {
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the function directly if it is a static function.
closureVal := s.expr(fn)
- closure := s.openDeferSave(nil, fn.Type, closureVal)
+ closure := s.openDeferSave(nil, fn.Type(), closureVal)
opendefer.closureNode = closure.Aux.(*ir.Node)
- if !(fn.Op == ir.ONAME && fn.Class() == ir.PFUNC) {
+ if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) {
opendefer.closure = closure
}
- } else if n.Op == ir.OCALLMETH {
- if fn.Op != ir.ODOTMETH {
+ } else if n.Op() == ir.OCALLMETH {
+ if fn.Op() != ir.ODOTMETH {
base.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
closureVal := s.getMethodClosure(fn)
// We must always store the function value in a stack slot for the
// runtime panic code to use. But in the defer exit code, we will
// call the method directly.
- closure := s.openDeferSave(nil, fn.Type, closureVal)
+ closure := s.openDeferSave(nil, fn.Type(), closureVal)
opendefer.closureNode = closure.Aux.(*ir.Node)
} else {
- if fn.Op != ir.ODOTINTER {
- base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ if fn.Op() != ir.ODOTINTER {
+ base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
closure, rcvr := s.getClosureAndRcvr(fn)
opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
// Important to get the receiver type correct, so it is recognized
// as a pointer for GC purposes.
- opendefer.rcvr = s.openDeferSave(nil, fn.Type.Recv().Type, rcvr)
+ opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
opendefer.closureNode = opendefer.closure.Aux.(*ir.Node)
opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Node)
}
- for _, argn := range n.Rlist.Slice() {
+ for _, argn := range n.Rlist().Slice() {
var v *ssa.Value
- if canSSAType(argn.Type) {
- v = s.openDeferSave(nil, argn.Type, s.expr(argn))
+ if canSSAType(argn.Type()) {
+ v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
} else {
- v = s.openDeferSave(argn, argn.Type, nil)
+ v = s.openDeferSave(argn, argn.Type(), nil)
}
args = append(args, v)
argNodes = append(argNodes, v.Aux.(*ir.Node))
if canSSA {
pos = val.Pos
} else {
- pos = n.Pos
+ pos = n.Pos()
}
argTemp := tempAt(pos.WithNotStmt(), s.curfn, t)
- argTemp.Name.SetOpenDeferSlot(true)
+ argTemp.Name().SetOpenDeferSlot(true)
var addrArgTemp *ssa.Value
// Use OpVarLive to make sure stack slots for the args, etc. are not
// removed by dead-store elimination
// associated defer call has been activated).
s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
s.defvars[s.f.Entry.ID][memVar] = s.entryNewValue1A(ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
- addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
+ addrArgTemp = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
- addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
+ addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
}
if t.HasPointers() {
// Since we may use this argTemp during exit depending on the
// Therefore, we must make sure it is zeroed out in the entry
// block if it contains pointers, else GC may wrongly follow an
// uninitialized pointer value.
- argTemp.Name.SetNeedzero(true)
+ argTemp.Name().SetNeedzero(true)
}
if !canSSA {
a := s.addr(n)
// closure/receiver/args that were stored in argtmps at the point
// of the defer statement.
argStart := base.Ctxt.FixedFrameSize()
- fn := r.n.Left
- stksize := fn.Type.ArgWidth()
+ fn := r.n.Left()
+ stksize := fn.Type().ArgWidth()
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
}
} else {
- aux := ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults)
+ aux := ssa.StaticAuxCall(fn.Sym().Linksym(), ACArgs, ACResults)
if testLateExpansion {
callArgs = append(callArgs, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
if r.rcvrNode != nil {
- if r.rcvrNode.Type.HasPointers() {
+ if r.rcvrNode.Type().HasPointers() {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
}
}
for _, argNode := range r.argNodes {
- if argNode.Type.HasPointers() {
+ if argNode.Type().HasPointers() {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
}
}
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
- fn := n.Left
+ fn := n.Left()
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
- res := n.Left.Type.Results()
+ res := n.Left().Type().Results()
if k == callNormal {
nf := res.NumFields()
for i := 0; i < nf; i++ {
testLateExpansion := false
- switch n.Op {
+ switch n.Op() {
case ir.OCALLFUNC:
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
- if k == callNormal && fn.Op == ir.ONAME && fn.Class() == ir.PFUNC {
- sym = fn.Sym
+ if k == callNormal && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC {
+ sym = fn.Sym()
break
}
closure = s.expr(fn)
s.maybeNilCheckClosure(closure, k)
}
case ir.OCALLMETH:
- if fn.Op != ir.ODOTMETH {
+ if fn.Op() != ir.ODOTMETH {
s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
}
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal {
- sym = fn.Sym
+ sym = fn.Sym()
break
}
closure = s.getMethodClosure(fn)
// Note: receiver is already present in n.Rlist, so we don't
// want to set it here.
case ir.OCALLINTER:
- if fn.Op != ir.ODOTINTER {
- s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
+ if fn.Op() != ir.ODOTINTER {
+ s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
var iclosure *ssa.Value
closure = iclosure
}
}
- dowidth(fn.Type)
- stksize := fn.Type.ArgWidth() // includes receiver, args, and results
+ dowidth(fn.Type())
+ stksize := fn.Type().ArgWidth() // includes receiver, args, and results
// Run all assignments of temps.
// The temps are introduced to avoid overwriting argument
// slots when arguments themselves require function calls.
- s.stmtList(n.List)
+ s.stmtList(n.List())
var call *ssa.Value
if k == callDeferStack {
testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
// Make a defer struct d on the stack.
t := deferstruct(stksize)
- d := tempAt(n.Pos, s.curfn, t)
+ d := tempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
// 11: fd
// Then, store all the arguments of the defer call.
- ft := fn.Type
+ ft := fn.Type()
off := t.FieldOff(12)
- args := n.Rlist.Slice()
+ args := n.Rlist().Slice()
// Set receiver (for interface calls). Always a pointer.
if rcvr != nil {
s.store(types.Types[types.TUINTPTR], p, rcvr)
}
// Set receiver (for method calls).
- if n.Op == ir.OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
f := ft.Recv()
s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
args = args[1:]
}
// Write args.
- t := n.Left.Type
- args := n.Rlist.Slice()
- if n.Op == ir.OCALLMETH {
+ t := n.Left().Type()
+ args := n.Rlist().Slice()
+ if n.Op() == ir.OCALLMETH {
f := t.Recv()
ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion)
ACArgs = append(ACArgs, ACArg)
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
}
default:
- s.Fatalf("bad call type %v %v", n.Op, n)
+ s.Fatalf("bad call type %v %v", n.Op(), n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
s.vars[memVar] = call
}
// Insert OVARLIVE nodes
- s.stmtList(n.Nbody)
+ s.stmtList(n.Body())
// Finish block for defers
if k == callDefer || k == callDeferStack {
if testLateExpansion {
return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
- return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize()))
+ return s.load(n.Type(), s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize()))
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
// Make a PFUNC node out of that, then evaluate it.
// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
// We can then pass that to defer or go.
- n2 := ir.NewNameAt(fn.Pos, fn.Sym)
- n2.Name.Curfn = s.curfn
+ n2 := ir.NewNameAt(fn.Pos(), fn.Sym())
+ n2.Name().Curfn = s.curfn
n2.SetClass(ir.PFUNC)
// n2.Sym already existed, so it's already marked as a function.
- n2.Pos = fn.Pos
- n2.Type = types.Types[types.TUINT8] // fake type for a static closure. Could use runtime.funcval if we had it.
+ n2.SetPos(fn.Pos())
+ n2.SetType(types.Types[types.TUINT8]) // fake type for a static closure. Could use runtime.funcval if we had it.
return s.expr(n2)
}
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
func (s *state) getClosureAndRcvr(fn *ir.Node) (*ssa.Value, *ssa.Value) {
- i := s.expr(fn.Left)
+ i := s.expr(fn.Left())
itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
- itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
+ itabidx := fn.Offset() + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
return closure, rcvr
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
func (s *state) addr(n *ir.Node) *ssa.Value {
- if n.Op != ir.ONAME {
- s.pushLine(n.Pos)
+ if n.Op() != ir.ONAME {
+ s.pushLine(n.Pos())
defer s.popLine()
}
- t := types.NewPtr(n.Type)
- switch n.Op {
+ t := types.NewPtr(n.Type())
+ switch n.Op() {
case ir.ONAME:
switch n.Class() {
case ir.PEXTERN:
// global variable
- v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
+ v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym().Linksym(), s.sb)
// TODO: Make OpAddr use AuxInt as well as Aux.
- if n.Xoffset != 0 {
- v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
+ if n.Offset() != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Offset(), v)
}
return v
case ir.PPARAM:
case ir.ORESULT:
// load return from callee
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
- return s.constOffPtrSP(t, n.Xoffset)
+ return s.constOffPtrSP(t, n.Offset())
}
- which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
+ which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Offset())
if which == -1 {
// Do the old thing // TODO: Panic instead.
- return s.constOffPtrSP(t, n.Xoffset)
+ return s.constOffPtrSP(t, n.Offset())
}
x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
return x
case ir.OINDEX:
- if n.Left.Type.IsSlice() {
- a := s.expr(n.Left)
- i := s.expr(n.Right)
+ if n.Left().Type().IsSlice() {
+ a := s.expr(n.Left())
+ i := s.expr(n.Right())
len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
p := s.newValue1(ssa.OpSlicePtr, t, a)
return s.newValue2(ssa.OpPtrIndex, t, p, i)
} else { // array
- a := s.addr(n.Left)
- i := s.expr(n.Right)
- len := s.constInt(types.Types[types.TINT], n.Left.Type.NumElem())
+ a := s.addr(n.Left())
+ i := s.expr(n.Right())
+ len := s.constInt(types.Types[types.TINT], n.Left().Type().NumElem())
i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
- return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left().Type().Elem()), a, i)
}
case ir.ODEREF:
- return s.exprPtr(n.Left, n.Bounded(), n.Pos)
+ return s.exprPtr(n.Left(), n.Bounded(), n.Pos())
case ir.ODOT:
- p := s.addr(n.Left)
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
+ p := s.addr(n.Left())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
case ir.ODOTPTR:
- p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
+ p := s.exprPtr(n.Left(), n.Bounded(), n.Pos())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
case ir.OCLOSUREVAR:
- return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(),
s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
case ir.OCONVNOP:
- addr := s.addr(n.Left)
+ addr := s.addr(n.Left())
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
return s.callAddr(n, callNormal)
}
return v.Args[0]
default:
- s.Fatalf("unhandled addr %v", n.Op)
+ s.Fatalf("unhandled addr %v", n.Op())
return nil
}
}
if base.Flag.N != 0 {
return false
}
- for n.Op == ir.ODOT || (n.Op == ir.OINDEX && n.Left.Type.IsArray()) {
- n = n.Left
+ for n.Op() == ir.ODOT || (n.Op() == ir.OINDEX && n.Left().Type().IsArray()) {
+ n = n.Left()
}
- if n.Op != ir.ONAME {
+ if n.Op() != ir.ONAME {
return false
}
- if n.Name.Addrtaken() {
+ if n.Name().Addrtaken() {
return false
}
if isParamHeapCopy(n) {
return false
}
}
- if n.Class() == ir.PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
+ if n.Class() == ir.PPARAM && n.Sym() != nil && n.Sym().Name == ".this" {
// wrappers generated by genwrapper need to update
// the .this pointer in place.
// TODO: treat as a PPARAMOUT?
return false
}
- return canSSAType(n.Type)
+ return canSSAType(n.Type())
// TODO: try to make more variables SSAable?
}
// Used only for automatically inserted nil checks,
// not for user code like 'x != nil'.
func (s *state) nilCheck(ptr *ssa.Value) {
- if base.Debug.DisableNil != 0 || s.curfn.Func.NilCheckDisabled() {
+ if base.Debug.DisableNil != 0 || s.curfn.Func().NilCheckDisabled() {
return
}
s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
}
if needcheck {
// do a size-appropriate check for zero
- cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type), types.Types[types.TBOOL], b, s.zeroVal(n.Type))
+ cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
s.check(cmp, panicdivide)
}
- return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
}
// rtcall issues a call to the given runtime function fn with the listed args.
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
type u322fcvtTab struct {
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
func (s *state) referenceTypeBuiltin(n *ir.Node, x *ssa.Value) *ssa.Value {
- if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
+ if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
// if n == nil {
// // cap
// return *(((*int)n)+1)
// }
- lenType := n.Type
+ lenType := n.Type()
nilValue := s.constNil(types.Types[types.TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
b := s.endBlock()
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- switch n.Op {
+ switch n.Op() {
case ir.OLEN:
// length is stored in the first word for map/chan
s.vars[n] = s.load(lenType, x)
bElse.AddEdgeTo(bAfter)
s.startBlock(bAfter)
- return s.variable(n, n.Type)
+ return s.variable(n, n.Type())
}
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) {
- iface := s.expr(n.Left) // input interface
- target := s.expr(n.Right) // target type
+ iface := s.expr(n.Left()) // input interface
+ target := s.expr(n.Right()) // target type
byteptr := s.f.Config.Types.BytePtr
- if n.Type.IsInterface() {
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsInterface() {
+ if n.Type().IsEmptyInterface() {
// Converting to an empty interface.
// Input could be an empty or nonempty interface.
if base.Debug.TypeAssert > 0 {
- base.WarnfAt(n.Pos, "type assertion inlined")
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Get itab/type field from input.
// Conversion succeeds iff that field is not nil.
cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
- if n.Left.Type.IsEmptyInterface() && commaok {
+ if n.Left().Type().IsEmptyInterface() && commaok {
// Converting empty interface to empty interface with ,ok is just a nil check.
return iface, cond
}
// On success, return (perhaps modified) input interface.
s.startBlock(bOk)
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
res = iface // Use input interface unchanged.
return
}
// Load type out of itab, build interface with existing idata.
off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
typ := s.load(byteptr, off)
- idata := s.newValue1(ssa.OpIData, n.Type, iface)
- res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
+ idata := s.newValue1(ssa.OpIData, n.Type(), iface)
+ res = s.newValue2(ssa.OpIMake, n.Type(), typ, idata)
return
}
bOk.AddEdgeTo(bEnd)
bFail.AddEdgeTo(bEnd)
s.startBlock(bEnd)
- idata := s.newValue1(ssa.OpIData, n.Type, iface)
- res = s.newValue2(ssa.OpIMake, n.Type, s.variable(typVar, byteptr), idata)
+ idata := s.newValue1(ssa.OpIData, n.Type(), iface)
+ res = s.newValue2(ssa.OpIMake, n.Type(), s.variable(typVar, byteptr), idata)
resok = cond
delete(s.vars, typVar)
return
}
// converting to a nonempty interface needs a runtime call.
if base.Debug.TypeAssert > 0 {
- base.WarnfAt(n.Pos, "type assertion not inlined")
+ base.WarnfAt(n.Pos(), "type assertion not inlined")
}
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
if commaok {
- call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[types.TBOOL]}, target, iface)
+ call := s.rtcall(assertE2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertE2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
if commaok {
- call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[types.TBOOL]}, target, iface)
+ call := s.rtcall(assertI2I2, true, []*types.Type{n.Type(), types.Types[types.TBOOL]}, target, iface)
return call[0], call[1]
}
- return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
+ return s.rtcall(assertI2I, true, []*types.Type{n.Type()}, target, iface)[0], nil
}
if base.Debug.TypeAssert > 0 {
- base.WarnfAt(n.Pos, "type assertion inlined")
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
// Converting to a concrete type.
- direct := isdirectiface(n.Type)
+ direct := isdirectiface(n.Type())
itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
if base.Debug.TypeAssert > 0 {
- base.WarnfAt(n.Pos, "type assertion inlined")
+ base.WarnfAt(n.Pos(), "type assertion inlined")
}
var targetITab *ssa.Value
- if n.Left.Type.IsEmptyInterface() {
+ if n.Left().Type().IsEmptyInterface() {
// Looking for pointer to target type.
targetITab = target
} else {
// Looking for pointer to itab for target type and source interface.
- targetITab = s.expr(n.List.First())
+ targetITab = s.expr(n.List().First())
}
var tmp *ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
- if commaok && !canSSAType(n.Type) {
+ if commaok && !canSSAType(n.Type()) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
- tmp = tempAt(n.Pos, s.curfn, n.Type)
+ tmp = tempAt(n.Pos(), s.curfn, n.Type())
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
addr = s.addr(tmp)
}
if !commaok {
// on failure, panic by calling panicdottype
s.startBlock(bFail)
- taddr := s.expr(n.Right.Right)
- if n.Left.Type.IsEmptyInterface() {
+ taddr := s.expr(n.Right().Right())
+ if n.Left().Type().IsEmptyInterface() {
s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
} else {
s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
// on success, return data from interface
s.startBlock(bOk)
if direct {
- return s.newValue1(ssa.OpIData, n.Type, iface), nil
+ return s.newValue1(ssa.OpIData, n.Type(), iface), nil
}
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- return s.load(n.Type, p), nil
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ return s.load(n.Type(), p), nil
}
// commaok is the more complicated case because we have
s.startBlock(bOk)
if tmp == nil {
if direct {
- s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
+ s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type(), iface)
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- s.vars[valVar] = s.load(n.Type, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ s.vars[valVar] = s.load(n.Type(), p)
}
} else {
- p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- s.move(n.Type, addr, p)
+ p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type()), iface)
+ s.move(n.Type(), addr, p)
}
s.vars[okVar] = s.constBool(true)
s.endBlock()
// type assertion failed
s.startBlock(bFail)
if tmp == nil {
- s.vars[valVar] = s.zeroVal(n.Type)
+ s.vars[valVar] = s.zeroVal(n.Type())
} else {
- s.zero(n.Type, addr)
+ s.zero(n.Type(), addr)
}
s.vars[okVar] = s.constBool(false)
s.endBlock()
// merge point
s.startBlock(bEnd)
if tmp == nil {
- res = s.variable(valVar, n.Type)
+ res = s.variable(valVar, n.Type())
delete(s.vars, valVar)
} else {
- res = s.load(n.Type, addr)
+ res = s.load(n.Type(), addr)
s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
}
resok = s.variable(okVar, types.Types[types.TBOOL])
// from being assigned too early. See #14591 and #14762. TODO: allow this.
return
}
- if n.Class() == ir.PAUTO && n.Xoffset != 0 {
- s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
+ if n.Class() == ir.PAUTO && n.Offset() != 0 {
+ s.Fatalf("AUTO var with offset %v %d", n, n.Offset())
}
- loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
+ loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
values, ok := s.f.NamedValues[loc]
if !ok {
s.f.Names = append(s.f.Names, loc)
type byXoffset []*ir.Node
func (s byXoffset) Len() int { return len(s) }
-func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
+func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
var vars []*ir.Node
- for _, n := range e.curfn.Func.Dcl {
- if livenessShouldTrack(n) && n.Name.Addrtaken() {
+ for _, n := range e.curfn.Func().Dcl {
+ if livenessShouldTrack(n) && n.Name().Addrtaken() {
vars = append(vars, n)
}
}
// Populate the stack object data.
// Format must match runtime/stack.go:stackObjectRecord.
- x := e.curfn.Func.LSym.Func().StackObjects
+ x := e.curfn.Func().LSym.Func().StackObjects
off := 0
off = duintptr(x, off, uint64(len(vars)))
for _, v := range vars {
// Note: arguments and return values have non-negative Xoffset,
// in which case the offset is relative to argp.
// Locals have a negative Xoffset, in which case the offset is relative to varp.
- off = duintptr(x, off, uint64(v.Xoffset))
- if !typesym(v.Type).Siggen() {
- e.Fatalf(v.Pos, "stack object's type symbol not generated for type %s", v.Type)
+ off = duintptr(x, off, uint64(v.Offset()))
+ if !typesym(v.Type()).Siggen() {
+ e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
}
- off = dsymptr(x, off, dtypesym(v.Type), 0)
+ off = dsymptr(x, off, dtypesym(v.Type()), 0)
}
// Emit a funcdata pointing at the stack object data.
if base.Flag.Live != 0 {
for _, v := range vars {
- base.WarnfAt(v.Pos, "stack object %v %s", v, v.Type.String())
+ base.WarnfAt(v.Pos(), "stack object %v %s", v, v.Type().String())
}
}
}
s.livenessMap = liveness(e, f, pp)
emitStackObjects(e, pp)
- openDeferInfo := e.curfn.Func.LSym.Func().OpenCodedDeferInfo
+ openDeferInfo := e.curfn.Func().LSym.Func().OpenCodedDeferInfo
if openDeferInfo != nil {
// This function uses open-coded defers -- write out the funcdata
// info that we computed at the end of genssa.
// some of the inline marks.
// Use this instruction instead.
p.Pos = p.Pos.WithIsStmt() // promote position to a statement
- pp.curfn.Func.LSym.Func().AddInlMark(p, inlMarks[m])
+ pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[m])
// Make the inline mark a real nop, so it doesn't generate any code.
m.As = obj.ANOP
m.Pos = src.NoXPos
// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
for _, p := range inlMarkList {
if p.As != obj.ANOP {
- pp.curfn.Func.LSym.Func().AddInlMark(p, inlMarks[p])
+ pp.curfn.Func().LSym.Func().AddInlMark(p, inlMarks[p])
}
}
}
if base.Ctxt.Flag_locationlists {
debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset)
- e.curfn.Func.DebugInfo = debugInfo
+ e.curfn.Func().DebugInfo = debugInfo
bstart := s.bstart
// Note that at this moment, Prog.Pc is a sequence number; it's
// not a real PC until after assembly, so this mapping has to
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
- return e.curfn.Func.LSym.Size
+ return e.curfn.Func().LSym.Size
default:
return valueToProgAfter[v].Pc
}
// Fill in argument and frame size.
pp.Text.To.Type = obj.TYPE_TEXTSIZE
- pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
+ pp.Text.To.Val = int32(Rnd(e.curfn.Type().ArgWidth(), int64(Widthreg)))
pp.Text.To.Offset = frame
// Insert code to zero ambiguously live variables so that the
var state uint32
// Iterate through declarations. They are sorted in decreasing Xoffset order.
- for _, n := range e.curfn.Func.Dcl {
- if !n.Name.Needzero() {
+ for _, n := range e.curfn.Func().Dcl {
+ if !n.Name().Needzero() {
continue
}
if n.Class() != ir.PAUTO {
- e.Fatalf(n.Pos, "needzero class %d", n.Class())
+ e.Fatalf(n.Pos(), "needzero class %d", n.Class())
}
- if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
- e.Fatalf(n.Pos, "var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
+ if n.Type().Size()%int64(Widthptr) != 0 || n.Offset()%int64(Widthptr) != 0 || n.Type().Size() == 0 {
+ e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset())
}
- if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
+ if lo != hi && n.Offset()+n.Type().Size() >= lo-int64(2*Widthreg) {
// Merge with range we already have.
- lo = n.Xoffset
+ lo = n.Offset()
continue
}
p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
- lo = n.Xoffset
- hi = lo + n.Type.Size()
+ lo = n.Offset()
+ hi = lo + n.Type().Size()
}
// Zero final range.
case *ir.Node:
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
- a.Sym = n.Orig.Sym.Linksym()
- a.Offset += n.Xoffset
+ a.Sym = n.Orig().Sym().Linksym()
+ a.Offset += n.Offset()
break
}
a.Name = obj.NAME_AUTO
- a.Sym = n.Sym.Linksym()
- a.Offset += n.Xoffset
+ a.Sym = n.Sym().Linksym()
+ a.Offset += n.Offset()
default:
v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
}
func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := AutoVar(v)
a.Type = obj.TYPE_MEM
- a.Sym = n.Sym.Linksym()
+ a.Sym = n.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
- a.Offset = n.Xoffset + off
+ a.Offset = n.Offset() + off
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
} else {
}
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
- a.Sym = s.ScratchFpMem.Sym.Linksym()
+ a.Sym = s.ScratchFpMem.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
- a.Offset = s.ScratchFpMem.Xoffset
+ a.Offset = s.ScratchFpMem.Offset()
}
// Call returns a new CALL instruction for the SSA value v.
// fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *ir.Node) int {
- t := n.Left.Type
- f := n.Sym
+ t := n.Left().Type()
+ f := n.Sym()
if !t.IsStruct() {
panic("ODOT's LHS is not a struct")
}
i++
continue
}
- if t1.Offset != n.Xoffset {
+ if t1.Offset != n.Offset() {
panic("field offset doesn't match")
}
return i
if e.strings == nil {
e.strings = make(map[string]*obj.LSym)
}
- data := stringsym(e.curfn.Pos, s)
+ data := stringsym(e.curfn.Pos(), s)
e.strings[s] = data
return data
}
t := types.NewPtr(types.Types[types.TUINT8])
// Split this interface up into two separate variables.
f := ".itab"
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsEmptyInterface() {
f = ".type"
}
c := e.SplitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
n := name.N
at := name.Type
if at.NumElem() != 1 {
- e.Fatalf(n.Pos, "bad array size")
+ e.Fatalf(n.Pos(), "bad array size")
}
et := at.Elem()
return e.SplitSlot(&name, "[0]", 0, et)
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
- if node.Class() != ir.PAUTO || node.Name.Addrtaken() {
+ if node.Class() != ir.PAUTO || node.Name().Addrtaken() {
// addressed things and non-autos retain their parents (i.e., cannot truly be split)
return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
}
- s := &types.Sym{Name: node.Sym.Name + suffix, Pkg: ir.LocalPkg}
- n := ir.NewNameAt(parent.N.Pos, s)
+ s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: ir.LocalPkg}
+ n := ir.NewNameAt(parent.N.Pos(), s)
s.Def = ir.AsTypesNode(n)
- ir.AsNode(s.Def).Name.SetUsed(true)
- n.Type = t
+ ir.AsNode(s.Def).Name().SetUsed(true)
+ n.SetType(t)
n.SetClass(ir.PAUTO)
- n.Esc = EscNever
- n.Name.Curfn = e.curfn
- e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
+ n.SetEsc(EscNever)
+ n.Name().Curfn = e.curfn
+ e.curfn.Func().Dcl = append(e.curfn.Func().Dcl, n)
dowidth(t)
return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
}
}
func (e *ssafn) SetWBPos(pos src.XPos) {
- e.curfn.Func.SetWBPos(pos)
+ e.curfn.Func().SetWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
}
func clobberBase(n *ir.Node) *ir.Node {
- if n.Op == ir.ODOT && n.Left.Type.NumFields() == 1 {
- return clobberBase(n.Left)
+ if n.Op() == ir.ODOT && n.Left().Type().NumFields() == 1 {
+ return clobberBase(n.Left())
}
- if n.Op == ir.OINDEX && n.Left.Type.IsArray() && n.Left.Type.NumElem() == 1 {
- return clobberBase(n.Left)
+ if n.Op() == ir.OINDEX && n.Left().Type().IsArray() && n.Left().Type().NumElem() == 1 {
+ return clobberBase(n.Left())
}
return n
}
// whose Pos will point back to their declaration position rather than
// their usage position.
func hasUniquePos(n *ir.Node) bool {
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OPACK:
return false
case ir.OLITERAL, ir.ONIL, ir.OTYPE:
- if n.Sym != nil {
+ if n.Sym() != nil {
return false
}
}
- if !n.Pos.IsKnown() {
+ if !n.Pos().IsKnown() {
if base.Flag.K != 0 {
base.Warn("setlineno: unknown position (line 0)")
}
func setlineno(n *ir.Node) src.XPos {
lno := base.Pos
if n != nil && hasUniquePos(n) {
- base.Pos = n.Pos
+ base.Pos = n.Pos()
}
return lno
}
if Curfn == nil {
base.Fatalf("autolabel outside function")
}
- n := fn.Func.Label
- fn.Func.Label++
+ n := fn.Func().Label
+ fn.Func().Label++
return lookupN(prefix, int(n))
}
s1.Def = s.Def
s1.Block = s.Block
- if ir.AsNode(s1.Def).Name == nil {
+ if ir.AsNode(s1.Def).Name() == nil {
ir.Dump("s1def", ir.AsNode(s1.Def))
base.Fatalf("missing Name")
}
- ir.AsNode(s1.Def).Name.Pack = pack
+ ir.AsNode(s1.Def).Name().Pack = pack
s1.Origpkg = opkg
n++
}
if n == 0 {
// can't possibly be used - there were no symbols
- base.ErrorfAt(pack.Pos, "imported and not used: %q", opkg.Path)
+ base.ErrorfAt(pack.Pos(), "imported and not used: %q", opkg.Path)
}
}
// newname returns a new ONAME Node associated with symbol s.
func NewName(s *types.Sym) *ir.Node {
n := ir.NewNameAt(base.Pos, s)
- n.Name.Curfn = Curfn
+ n.Name().Curfn = Curfn
return n
}
// and the Sym field set to sym. This is for ODOT and friends.
func nodlSym(pos src.XPos, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node {
n := ir.NodAt(pos, op, left, nil)
- n.Sym = sym
+ n.SetSym(sym)
return n
}
func nodnil() *ir.Node {
n := ir.Nod(ir.ONIL, nil, nil)
- n.Type = types.Types[types.TNIL]
+ n.SetType(types.Types[types.TNIL])
return n
}
return nil
}
- switch n.Op {
+ switch n.Op() {
default:
m := ir.SepCopy(n)
- m.Left = treecopy(n.Left, pos)
- m.Right = treecopy(n.Right, pos)
- m.List.Set(listtreecopy(n.List.Slice(), pos))
+ m.SetLeft(treecopy(n.Left(), pos))
+ m.SetRight(treecopy(n.Right(), pos))
+ m.PtrList().Set(listtreecopy(n.List().Slice(), pos))
if pos.IsKnown() {
- m.Pos = pos
+ m.SetPos(pos)
}
- if m.Name != nil && n.Op != ir.ODCLFIELD {
+ if m.Name() != nil && n.Op() != ir.ODCLFIELD {
ir.Dump("treecopy", n)
base.Fatalf("treecopy Name")
}
// Convert node n for assignment to type t.
func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node {
- if n == nil || n.Type == nil || n.Type.Broke() {
+ if n == nil || n.Type() == nil || n.Type().Broke() {
return n
}
- if t.Etype == types.TBLANK && n.Type.Etype == types.TNIL {
+ if t.Etype == types.TBLANK && n.Type().Etype == types.TNIL {
base.Errorf("use of untyped nil")
}
n = convlit1(n, t, false, context)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
if t.Etype == types.TBLANK {
// Convert ideal bool from comparison to plain bool
// if the next step is non-bool (like interface{}).
- if n.Type == types.UntypedBool && !t.IsBoolean() {
- if n.Op == ir.ONAME || n.Op == ir.OLITERAL {
+ if n.Type() == types.UntypedBool && !t.IsBoolean() {
+ if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
r := ir.Nod(ir.OCONVNOP, n, nil)
- r.Type = types.Types[types.TBOOL]
+ r.SetType(types.Types[types.TBOOL])
r.SetTypecheck(1)
r.SetImplicit(true)
n = r
}
}
- if types.Identical(n.Type, t) {
+ if types.Identical(n.Type(), t) {
return n
}
- op, why := assignop(n.Type, t)
+ op, why := assignop(n.Type(), t)
if op == ir.OXXX {
base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
op = ir.OCONV
}
r := ir.Nod(op, n, nil)
- r.Type = t
+ r.SetType(t)
r.SetTypecheck(1)
r.SetImplicit(true)
- r.Orig = n.Orig
+ r.SetOrig(n.Orig())
return r
}
base.Fatalf("backingArrayPtrLen not cheap: %v", n)
}
ptr = ir.Nod(ir.OSPTR, n, nil)
- if n.Type.IsString() {
- ptr.Type = types.Types[types.TUINT8].PtrTo()
+ if n.Type().IsString() {
+ ptr.SetType(types.Types[types.TUINT8].PtrTo())
} else {
- ptr.Type = n.Type.Elem().PtrTo()
+ ptr.SetType(n.Type().Elem().PtrTo())
}
len = ir.Nod(ir.OLEN, n, nil)
- len.Type = types.Types[types.TINT]
+ len.SetType(types.Types[types.TINT])
return ptr, len
}
// labeledControl returns the control flow Node (for, switch, select)
// associated with the label n, if any.
func labeledControl(n *ir.Node) *ir.Node {
- if n.Op != ir.OLABEL {
- base.Fatalf("labeledControl %v", n.Op)
+ if n.Op() != ir.OLABEL {
+ base.Fatalf("labeledControl %v", n.Op())
}
- ctl := n.Name.Defn
+ ctl := n.Name().Defn
if ctl == nil {
return nil
}
- switch ctl.Op {
+ switch ctl.Op() {
case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT:
return ctl
}
}
func calcHasCall(n *ir.Node) bool {
- if n.Ninit.Len() != 0 {
+ if n.Init().Len() != 0 {
// TODO(mdempsky): This seems overly conservative.
return true
}
- switch n.Op {
+ switch n.Op() {
case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OTYPE:
if n.HasCall() {
base.Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n)
// When using soft-float, these ops might be rewritten to function calls
// so we ensure they are evaluated first.
case ir.OADD, ir.OSUB, ir.ONEG, ir.OMUL:
- if thearch.SoftFloat && (isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) {
+ if thearch.SoftFloat && (isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) {
return true
}
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
- if thearch.SoftFloat && (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype]) {
+ if thearch.SoftFloat && (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype]) {
return true
}
case ir.OCONV:
- if thearch.SoftFloat && ((isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) || (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype])) {
+ if thearch.SoftFloat && ((isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) || (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype])) {
return true
}
}
- if n.Left != nil && n.Left.HasCall() {
+ if n.Left() != nil && n.Left().HasCall() {
return true
}
- if n.Right != nil && n.Right.HasCall() {
+ if n.Right() != nil && n.Right().HasCall() {
return true
}
return false
return nil
}
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
case ir.ODOT, ir.OLEN, ir.OCAP:
- l := safeexpr(n.Left, init)
- if l == n.Left {
+ l := safeexpr(n.Left(), init)
+ if l == n.Left() {
return n
}
r := ir.Copy(n)
- r.Left = l
+ r.SetLeft(l)
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
return r
case ir.ODOTPTR, ir.ODEREF:
- l := safeexpr(n.Left, init)
- if l == n.Left {
+ l := safeexpr(n.Left(), init)
+ if l == n.Left() {
return n
}
a := ir.Copy(n)
- a.Left = l
+ a.SetLeft(l)
a = walkexpr(a, init)
return a
case ir.OINDEX, ir.OINDEXMAP:
- l := safeexpr(n.Left, init)
- r := safeexpr(n.Right, init)
- if l == n.Left && r == n.Right {
+ l := safeexpr(n.Left(), init)
+ r := safeexpr(n.Right(), init)
+ if l == n.Left() && r == n.Right() {
return n
}
a := ir.Copy(n)
- a.Left = l
- a.Right = r
+ a.SetLeft(l)
+ a.SetRight(r)
a = walkexpr(a, init)
return a
// return side-effect free and cheap n, appending side effects to init.
// result may not be assignable.
func cheapexpr(n *ir.Node, init *ir.Nodes) *ir.Node {
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
}
- return copyexpr(n, n.Type, init)
+ return copyexpr(n, n.Type(), init)
}
// Code to resolve elided DOTs in embedded types.
// will give shortest unique addressing.
// modify the tree with missing type names.
func adddot(n *ir.Node) *ir.Node {
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
- if n.Left.Diag() {
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
+ if n.Left().Diag() {
n.SetDiag(true)
}
- t := n.Left.Type
+ t := n.Left().Type()
if t == nil {
return n
}
- if n.Left.Op == ir.OTYPE {
+ if n.Left().Op() == ir.OTYPE {
return n
}
- s := n.Sym
+ s := n.Sym()
if s == nil {
return n
}
case path != nil:
// rebuild elided dots
for c := len(path) - 1; c >= 0; c-- {
- n.Left = nodSym(ir.ODOT, n.Left, path[c].field.Sym)
- n.Left.SetImplicit(true)
+ n.SetLeft(nodSym(ir.ODOT, n.Left(), path[c].field.Sym))
+ n.Left().SetImplicit(true)
}
case ambig:
base.Errorf("ambiguous selector %v", n)
- n.Left = nil
+ n.SetLeft(nil)
}
return n
gen++
}
a := symfield(s, t.Type)
- a.Pos = t.Pos
+ a.SetPos(t.Pos)
a.SetIsDDD(t.IsDDD())
args = append(args, a)
}
dclcontext = ir.PEXTERN
tfn := ir.Nod(ir.OTFUNC, nil, nil)
- tfn.Left = namedfield(".this", rcvr)
- tfn.List.Set(structargs(method.Type.Params(), true))
- tfn.Rlist.Set(structargs(method.Type.Results(), false))
+ tfn.SetLeft(namedfield(".this", rcvr))
+ tfn.PtrList().Set(structargs(method.Type.Params(), true))
+ tfn.PtrRlist().Set(structargs(method.Type.Results(), false))
fn := dclfunc(newnam, tfn)
- fn.Func.SetDupok(true)
+ fn.Func().SetDupok(true)
- nthis := ir.AsNode(tfn.Type.Recv().Nname)
+ nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
// generating wrapper from *T to T.
n := ir.Nod(ir.OIF, nil, nil)
- n.Left = ir.Nod(ir.OEQ, nthis, nodnil())
+ n.SetLeft(ir.Nod(ir.OEQ, nthis, nodnil()))
call := ir.Nod(ir.OCALL, syslook("panicwrap"), nil)
- n.Nbody.Set1(call)
- fn.Nbody.Append(n)
+ n.PtrBody().Set1(call)
+ fn.PtrBody().Append(n)
}
dot := adddot(nodSym(ir.OXDOT, nthis, method.Sym))
// value for that function.
if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
// generate tail call: adjust pointer receiver and jump to embedded method.
- dot = dot.Left // skip final .M
+ dot = dot.Left() // skip final .M
// TODO(mdempsky): Remove dependency on dotlist.
if !dotlist[0].field.Type.IsPtr() {
dot = ir.Nod(ir.OADDR, dot, nil)
}
as := ir.Nod(ir.OAS, nthis, convnop(dot, rcvr))
- fn.Nbody.Append(as)
- fn.Nbody.Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
+ fn.PtrBody().Append(as)
+ fn.PtrBody().Append(nodSym(ir.ORETJMP, nil, methodSym(methodrcvr, method.Sym)))
} else {
- fn.Func.SetWrapper(true) // ignore frame for panic+recover matching
+ fn.Func().SetWrapper(true) // ignore frame for panic+recover matching
call := ir.Nod(ir.OCALL, dot, nil)
- call.List.Set(paramNnames(tfn.Type))
- call.SetIsDDD(tfn.Type.IsVariadic())
+ call.PtrList().Set(paramNnames(tfn.Type()))
+ call.SetIsDDD(tfn.Type().IsVariadic())
if method.Type.NumResults() > 0 {
n := ir.Nod(ir.ORETURN, nil, nil)
- n.List.Set1(call)
+ n.PtrList().Set1(call)
call = n
}
- fn.Nbody.Append(call)
+ fn.PtrBody().Append(call)
}
if false && base.Flag.LowerR != 0 {
- ir.DumpList("genwrapper body", fn.Nbody)
+ ir.DumpList("genwrapper body", fn.Body())
}
funcbody()
fn = typecheck(fn, ctxStmt)
Curfn = fn
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
// Inline calls within (*T).M wrappers. This is safe because we only
// generate those wrappers within the same compilation unit as (T).M.
n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []*ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[types.TUINTPTR]),
anonfield(types.Types[types.TUINTPTR]),
}, []*ir.Node{
anonfield(types.Types[types.TUINTPTR]),
- })
+ }))
return n
}
func liststmt(l []*ir.Node) *ir.Node {
n := ir.Nod(ir.OBLOCK, nil, nil)
- n.List.Set(l)
+ n.PtrList().Set(l)
if len(l) != 0 {
- n.Pos = l[0].Pos
+ n.SetPos(l[0].Pos())
}
return n
}
func ngotype(n *ir.Node) *types.Sym {
- if n.Type != nil {
- return typenamesym(n.Type)
+ if n.Type() != nil {
+ return typenamesym(n.Type())
}
return nil
}
if ir.MayBeShared(n) {
// Introduce OCONVNOP to hold init list.
n = ir.Nod(ir.OCONVNOP, n, nil)
- n.Type = n.Left.Type
+ n.SetType(n.Left().Type())
n.SetTypecheck(1)
}
- n.Ninit.Prepend(init...)
+ n.PtrInit().Prepend(init...)
n.SetHasCall(true)
return n
}
// itabType loads the _type field from a runtime.itab struct.
func itabType(itab *ir.Node) *ir.Node {
typ := nodSym(ir.ODOTPTR, itab, nil)
- typ.Type = types.NewPtr(types.Types[types.TUINT8])
+ typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
typ.SetTypecheck(1)
- typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab
- typ.SetBounded(true) // guaranteed not to fault
+ typ.SetOffset(int64(Widthptr)) // offset of _type in runtime.itab
+ typ.SetBounded(true) // guaranteed not to fault
return typ
}
}
ptr := nodlSym(pos, ir.OIDATA, n, nil)
if isdirectiface(t) {
- ptr.Type = t
+ ptr.SetType(t)
ptr.SetTypecheck(1)
return ptr
}
- ptr.Type = types.NewPtr(t)
+ ptr.SetType(types.NewPtr(t))
ptr.SetTypecheck(1)
ind := ir.NodAt(pos, ir.ODEREF, ptr, nil)
- ind.Type = t
+ ind.SetType(t)
ind.SetTypecheck(1)
ind.SetBounded(true)
return ind
// This is where t was declared or where it appeared as a type expression.
func typePos(t *types.Type) src.XPos {
n := ir.AsNode(t.Nod)
- if n == nil || !n.Pos.IsKnown() {
+ if n == nil || !n.Pos().IsKnown() {
base.Fatalf("bad type: %v", t)
}
- return n.Pos
+ return n.Pos()
}
// typecheckswitch typechecks a switch statement.
func typecheckswitch(n *ir.Node) {
- typecheckslice(n.Ninit.Slice(), ctxStmt)
- if n.Left != nil && n.Left.Op == ir.OTYPESW {
+ typecheckslice(n.Init().Slice(), ctxStmt)
+ if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
typecheckTypeSwitch(n)
} else {
typecheckExprSwitch(n)
}
func typecheckTypeSwitch(n *ir.Node) {
- n.Left.Right = typecheck(n.Left.Right, ctxExpr)
- t := n.Left.Right.Type
+ n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr))
+ t := n.Left().Right().Type()
if t != nil && !t.IsInterface() {
- base.ErrorfAt(n.Pos, "cannot type switch on non-interface value %L", n.Left.Right)
+ base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", n.Left().Right())
t = nil
}
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we won't
// notice that it went unused.
- if v := n.Left.Left; v != nil && !ir.IsBlank(v) && n.List.Len() == 0 {
- base.ErrorfAt(v.Pos, "%v declared but not used", v.Sym)
+ if v := n.Left().Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 {
+ base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
}
var defCase, nilCase *ir.Node
var ts typeSet
- for _, ncase := range n.List.Slice() {
- ls := ncase.List.Slice()
+ for _, ncase := range n.List().Slice() {
+ ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
- base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", ir.Line(defCase))
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
for i := range ls {
ls[i] = typecheck(ls[i], ctxExpr|ctxType)
n1 := ls[i]
- if t == nil || n1.Type == nil {
+ if t == nil || n1.Type() == nil {
continue
}
switch {
case ir.IsNil(n1): // case nil:
if nilCase != nil {
- base.ErrorfAt(ncase.Pos, "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
+ base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
} else {
nilCase = ncase
}
- case n1.Op != ir.OTYPE:
- base.ErrorfAt(ncase.Pos, "%L is not a type", n1)
- case !n1.Type.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr) && !missing.Broke():
+ case n1.Op() != ir.OTYPE:
+ base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
+ case !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke():
if have != nil && !have.Broke() {
- base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left().Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
- base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (%v method has pointer receiver)", n.Left.Right, n1.Type, missing.Sym)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (%v method has pointer receiver)", n.Left().Right(), n1.Type(), missing.Sym)
} else {
- base.ErrorfAt(ncase.Pos, "impossible type switch case: %L cannot have dynamic type %v"+
- " (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (missing %v method)", n.Left().Right(), n1.Type(), missing.Sym)
}
}
- if n1.Op == ir.OTYPE {
- ts.add(ncase.Pos, n1.Type)
+ if n1.Op() == ir.OTYPE {
+ ts.add(ncase.Pos(), n1.Type())
}
}
- if ncase.Rlist.Len() != 0 {
+ if ncase.Rlist().Len() != 0 {
// Assign the clause variable's type.
vt := t
if len(ls) == 1 {
- if ls[0].Op == ir.OTYPE {
- vt = ls[0].Type
+ if ls[0].Op() == ir.OTYPE {
+ vt = ls[0].Type()
} else if !ir.IsNil(ls[0]) {
// Invalid single-type case;
// mark variable as broken.
}
}
- nvar := ncase.Rlist.First()
- nvar.Type = vt
+ nvar := ncase.Rlist().First()
+ nvar.SetType(vt)
if vt != nil {
nvar = typecheck(nvar, ctxExpr|ctxAssign)
} else {
nvar.SetTypecheck(1)
nvar.SetWalkdef(1)
}
- ncase.Rlist.SetFirst(nvar)
+ ncase.Rlist().SetFirst(nvar)
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
func typecheckExprSwitch(n *ir.Node) {
t := types.Types[types.TBOOL]
- if n.Left != nil {
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- t = n.Left.Type
+ if n.Left() != nil {
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ t = n.Left().Type()
}
var nilonly string
case !IsComparable(t):
if t.IsStruct() {
- base.ErrorfAt(n.Pos, "cannot switch on %L (struct containing %v cannot be compared)", n.Left, IncomparableField(t).Type)
+ base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Left(), IncomparableField(t).Type)
} else {
- base.ErrorfAt(n.Pos, "cannot switch on %L", n.Left)
+ base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Left())
}
t = nil
}
var defCase *ir.Node
var cs constSet
- for _, ncase := range n.List.Slice() {
- ls := ncase.List.Slice()
+ for _, ncase := range n.List().Slice() {
+ ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
- base.ErrorfAt(ncase.Pos, "multiple defaults in switch (first at %v)", ir.Line(defCase))
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
ls[i] = typecheck(ls[i], ctxExpr)
ls[i] = defaultlit(ls[i], t)
n1 := ls[i]
- if t == nil || n1.Type == nil {
+ if t == nil || n1.Type() == nil {
continue
}
if nilonly != "" && !ir.IsNil(n1) {
- base.ErrorfAt(ncase.Pos, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
- } else if t.IsInterface() && !n1.Type.IsInterface() && !IsComparable(n1.Type) {
- base.ErrorfAt(ncase.Pos, "invalid case %L in switch (incomparable type)", n1)
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left())
+ } else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
} else {
- op1, _ := assignop(n1.Type, t)
- op2, _ := assignop(t, n1.Type)
+ op1, _ := assignop(n1.Type(), t)
+ op2, _ := assignop(t, n1.Type())
if op1 == ir.OXXX && op2 == ir.OXXX {
- if n.Left != nil {
- base.ErrorfAt(ncase.Pos, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
+ if n.Left() != nil {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left(), n1.Type(), t)
} else {
- base.ErrorfAt(ncase.Pos, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
}
}
}
// case GOARCH == "arm" && GOARM == "5":
// case GOARCH == "arm":
// which would both evaluate to false for non-ARM compiles.
- if !n1.Type.IsBoolean() {
- cs.add(ncase.Pos, n1, "case", "switch")
+ if !n1.Type().IsBoolean() {
+ cs.add(ncase.Pos(), n1, "case", "switch")
}
}
- typecheckslice(ncase.Nbody.Slice(), ctxStmt)
+ typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
// walkswitch walks a switch statement.
func walkswitch(sw *ir.Node) {
// Guard against double walk, see #25776.
- if sw.List.Len() == 0 && sw.Nbody.Len() > 0 {
+ if sw.List().Len() == 0 && sw.Body().Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
}
- if sw.Left != nil && sw.Left.Op == ir.OTYPESW {
+ if sw.Left() != nil && sw.Left().Op() == ir.OTYPESW {
walkTypeSwitch(sw)
} else {
walkExprSwitch(sw)
func walkExprSwitch(sw *ir.Node) {
lno := setlineno(sw)
- cond := sw.Left
- sw.Left = nil
+ cond := sw.Left()
+ sw.SetLeft(nil)
// convert switch {...} to switch true {...}
if cond == nil {
// because walkexpr will lower the string
// conversion into a runtime call.
// See issue 24937 for more discussion.
- if cond.Op == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
- cond.Op = ir.OBYTES2STRTMP
+ if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
+ cond.SetOp(ir.OBYTES2STRTMP)
}
- cond = walkexpr(cond, &sw.Ninit)
- if cond.Op != ir.OLITERAL && cond.Op != ir.ONIL {
- cond = copyexpr(cond, cond.Type, &sw.Nbody)
+ cond = walkexpr(cond, sw.PtrInit())
+ if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
+ cond = copyexpr(cond, cond.Type(), sw.PtrBody())
}
base.Pos = lno
var defaultGoto *ir.Node
var body ir.Nodes
- for _, ncase := range sw.List.Slice() {
+ for _, ncase := range sw.List().Slice() {
label := autolabel(".s")
- jmp := npos(ncase.Pos, nodSym(ir.OGOTO, nil, label))
+ jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
// Process case dispatch.
- if ncase.List.Len() == 0 {
+ if ncase.List().Len() == 0 {
if defaultGoto != nil {
base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
- for _, n1 := range ncase.List.Slice() {
- s.Add(ncase.Pos, n1, jmp)
+ for _, n1 := range ncase.List().Slice() {
+ s.Add(ncase.Pos(), n1, jmp)
}
// Process body.
- body.Append(npos(ncase.Pos, nodSym(ir.OLABEL, nil, label)))
- body.Append(ncase.Nbody.Slice()...)
- if fall, pos := hasFall(ncase.Nbody.Slice()); !fall {
+ body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
+ body.Append(ncase.Body().Slice()...)
+ if fall, pos := hasFall(ncase.Body().Slice()); !fall {
br := ir.Nod(ir.OBREAK, nil, nil)
- br.Pos = pos
+ br.SetPos(pos)
body.Append(br)
}
}
- sw.List.Set(nil)
+ sw.PtrList().Set(nil)
if defaultGoto == nil {
br := ir.Nod(ir.OBREAK, nil, nil)
- br.Pos = br.Pos.WithNotStmt()
+ br.SetPos(br.Pos().WithNotStmt())
defaultGoto = br
}
- s.Emit(&sw.Nbody)
- sw.Nbody.Append(defaultGoto)
- sw.Nbody.AppendNodes(&body)
- walkstmtlist(sw.Nbody.Slice())
+ s.Emit(sw.PtrBody())
+ sw.PtrBody().Append(defaultGoto)
+ sw.PtrBody().AppendNodes(&body)
+ walkstmtlist(sw.Body().Slice())
}
// An exprSwitch walks an expression switch.
func (s *exprSwitch) Add(pos src.XPos, expr, jmp *ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
- if okforcmp[s.exprname.Type.Etype] && expr.Op == ir.OLITERAL {
+ if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
return
}
// (e.g., sort.Slice doesn't need to invoke the less function
// when there's only a single slice element).
- if s.exprname.Type.IsString() && len(cc) >= 2 {
+ if s.exprname.Type().IsString() && len(cc) >= 2 {
// Sort strings by length and then by value. It is
// much cheaper to compare lengths than values, and
// all we need here is consistency. We respect this
},
func(i int, nif *ir.Node) {
run := runs[i]
- nif.Left = ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run)))
- s.search(run, &nif.Nbody)
+ nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
+ s.search(run, nif.PtrBody())
},
)
return
})
// Merge consecutive integer cases.
- if s.exprname.Type.IsInteger() {
+ if s.exprname.Type().IsInteger() {
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
},
func(i int, nif *ir.Node) {
c := &cc[i]
- nif.Left = c.test(s.exprname)
- nif.Nbody.Set1(c.jmp)
+ nif.SetLeft(c.test(s.exprname))
+ nif.PtrBody().Set1(c.jmp)
},
)
}
}
// Optimize "switch true { ...}" and "switch false { ... }".
- if ir.IsConst(exprname, constant.Bool) && !c.lo.Type.IsInterface() {
+ if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
if exprname.BoolVal() {
return c.lo
} else {
// Restricting to constants is simple and probably powerful
// enough.
- for _, ncase := range sw.List.Slice() {
- if ncase.Op != ir.OCASE {
- base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op)
+ for _, ncase := range sw.List().Slice() {
+ if ncase.Op() != ir.OCASE {
+ base.Fatalf("switch string(byteslice) bad op: %v", ncase.Op())
}
- for _, v := range ncase.List.Slice() {
- if v.Op != ir.OLITERAL {
+ for _, v := range ncase.List().Slice() {
+ if v.Op() != ir.OLITERAL {
return false
}
}
// nodes will be at the end of the list.
i := len(stmts) - 1
- for i >= 0 && stmts[i].Op == ir.OVARKILL {
+ for i >= 0 && stmts[i].Op() == ir.OVARKILL {
i--
}
if i < 0 {
return false, src.NoXPos
}
- return stmts[i].Op == ir.OFALL, stmts[i].Pos
+ return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
}
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
func walkTypeSwitch(sw *ir.Node) {
var s typeSwitch
- s.facename = sw.Left.Right
- sw.Left = nil
+ s.facename = sw.Left().Right()
+ sw.SetLeft(nil)
- s.facename = walkexpr(s.facename, &sw.Ninit)
- s.facename = copyexpr(s.facename, s.facename.Type, &sw.Nbody)
+ s.facename = walkexpr(s.facename, sw.PtrInit())
+ s.facename = copyexpr(s.facename, s.facename.Type(), sw.PtrBody())
s.okname = temp(types.Types[types.TBOOL])
// Get interface descriptor word.
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
ifNil := ir.Nod(ir.OIF, nil, nil)
- ifNil.Left = ir.Nod(ir.OEQ, itab, nodnil())
+ ifNil.SetLeft(ir.Nod(ir.OEQ, itab, nodnil()))
base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
- ifNil.Left = typecheck(ifNil.Left, ctxExpr)
- ifNil.Left = defaultlit(ifNil.Left, nil)
+ ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr))
+ ifNil.SetLeft(defaultlit(ifNil.Left(), nil))
// ifNil.Nbody assigned at end.
- sw.Nbody.Append(ifNil)
+ sw.PtrBody().Append(ifNil)
// Load hash from type or itab.
dotHash := nodSym(ir.ODOTPTR, itab, nil)
- dotHash.Type = types.Types[types.TUINT32]
+ dotHash.SetType(types.Types[types.TUINT32])
dotHash.SetTypecheck(1)
- if s.facename.Type.IsEmptyInterface() {
- dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
+ if s.facename.Type().IsEmptyInterface() {
+ dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime._type
} else {
- dotHash.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab
+ dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime.itab
}
dotHash.SetBounded(true) // guaranteed not to fault
- s.hashname = copyexpr(dotHash, dotHash.Type, &sw.Nbody)
+ s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody())
br := ir.Nod(ir.OBREAK, nil, nil)
var defaultGoto, nilGoto *ir.Node
var body ir.Nodes
- for _, ncase := range sw.List.Slice() {
+ for _, ncase := range sw.List().Slice() {
var caseVar *ir.Node
- if ncase.Rlist.Len() != 0 {
- caseVar = ncase.Rlist.First()
+ if ncase.Rlist().Len() != 0 {
+ caseVar = ncase.Rlist().First()
}
// For single-type cases with an interface type,
// we initialize the case variable as part of the type assertion.
// In other cases, we initialize it in the body.
var singleType *types.Type
- if ncase.List.Len() == 1 && ncase.List.First().Op == ir.OTYPE {
- singleType = ncase.List.First().Type
+ if ncase.List().Len() == 1 && ncase.List().First().Op() == ir.OTYPE {
+ singleType = ncase.List().First().Type()
}
caseVarInitialized := false
label := autolabel(".s")
- jmp := npos(ncase.Pos, nodSym(ir.OGOTO, nil, label))
+ jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
- if ncase.List.Len() == 0 { // default:
+ if ncase.List().Len() == 0 { // default:
if defaultGoto != nil {
base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
- for _, n1 := range ncase.List.Slice() {
+ for _, n1 := range ncase.List().Slice() {
if ir.IsNil(n1) { // case nil:
if nilGoto != nil {
base.Fatalf("duplicate nil case not detected during typechecking")
}
if singleType != nil && singleType.IsInterface() {
- s.Add(ncase.Pos, n1.Type, caseVar, jmp)
+ s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
caseVarInitialized = true
} else {
- s.Add(ncase.Pos, n1.Type, nil, jmp)
+ s.Add(ncase.Pos(), n1.Type(), nil, jmp)
}
}
- body.Append(npos(ncase.Pos, nodSym(ir.OLABEL, nil, label)))
+ body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
if caseVar != nil && !caseVarInitialized {
val := s.facename
if singleType != nil {
if singleType.IsInterface() {
base.Fatalf("singleType interface should have been handled in Add")
}
- val = ifaceData(ncase.Pos, s.facename, singleType)
+ val = ifaceData(ncase.Pos(), s.facename, singleType)
}
l := []*ir.Node{
- ir.NodAt(ncase.Pos, ir.ODCL, caseVar, nil),
- ir.NodAt(ncase.Pos, ir.OAS, caseVar, val),
+ ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil),
+ ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
}
- body.Append(ncase.Nbody.Slice()...)
+ body.Append(ncase.Body().Slice()...)
body.Append(br)
}
- sw.List.Set(nil)
+ sw.PtrList().Set(nil)
if defaultGoto == nil {
defaultGoto = br
if nilGoto == nil {
nilGoto = defaultGoto
}
- ifNil.Nbody.Set1(nilGoto)
+ ifNil.PtrBody().Set1(nilGoto)
- s.Emit(&sw.Nbody)
- sw.Nbody.Append(defaultGoto)
- sw.Nbody.AppendNodes(&body)
+ s.Emit(sw.PtrBody())
+ sw.PtrBody().Append(defaultGoto)
+ sw.PtrBody().AppendNodes(&body)
- walkstmtlist(sw.Nbody.Slice())
+ walkstmtlist(sw.Body().Slice())
}
// A typeSwitch walks a type switch.
// cv, ok = iface.(type)
as := ir.NodAt(pos, ir.OAS2, nil, nil)
- as.List.Set2(caseVar, s.okname) // cv, ok =
+ as.PtrList().Set2(caseVar, s.okname) // cv, ok =
dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil)
- dot.Type = typ // iface.(type)
- as.Rlist.Set1(dot)
+ dot.SetType(typ) // iface.(type)
+ as.PtrRlist().Set1(dot)
as = typecheck(as, ctxStmt)
as = walkexpr(as, &body)
body.Append(as)
// if ok { goto label }
nif := ir.NodAt(pos, ir.OIF, nil, nil)
- nif.Left = s.okname
- nif.Nbody.Set1(jmp)
+ nif.SetLeft(s.okname)
+ nif.PtrBody().Set1(jmp)
body.Append(nif)
if !typ.IsInterface() {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
- nif.Left = ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash)))
- nif.Nbody.AppendNodes(&c.body)
+ nif.SetLeft(ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash))))
+ nif.PtrBody().AppendNodes(&c.body)
},
)
}
nif := ir.Nod(ir.OIF, nil, nil)
leaf(i, nif)
base.Pos = base.Pos.WithNotStmt()
- nif.Left = typecheck(nif.Left, ctxExpr)
- nif.Left = defaultlit(nif.Left, nil)
+ nif.SetLeft(typecheck(nif.Left(), ctxExpr))
+ nif.SetLeft(defaultlit(nif.Left(), nil))
out.Append(nif)
- out = &nif.Rlist
+ out = nif.PtrRlist()
}
return
}
half := lo + n/2
nif := ir.Nod(ir.OIF, nil, nil)
- nif.Left = less(half)
+ nif.SetLeft(less(half))
base.Pos = base.Pos.WithNotStmt()
- nif.Left = typecheck(nif.Left, ctxExpr)
- nif.Left = defaultlit(nif.Left, nil)
- do(lo, half, &nif.Nbody)
- do(half, hi, &nif.Rlist)
+ nif.SetLeft(typecheck(nif.Left(), ctxExpr))
+ nif.SetLeft(defaultlit(nif.Left(), nil))
+ do(lo, half, nif.PtrBody())
+ do(half, hi, nif.PtrRlist())
out.Append(nif)
}
var pos, op string
var tc uint8
if n != nil {
- pos = base.FmtPos(n.Pos)
- op = n.Op.String()
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
tc = n.Typecheck()
}
var tc uint8
var typ *types.Type
if n != nil {
- pos = base.FmtPos(n.Pos)
- op = n.Op.String()
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
tc = n.Typecheck()
- typ = n.Type
+ typ = n.Type()
}
skipDowidthForTracing = true
// resolve ONONAME to definition, if any.
func resolve(n *ir.Node) (res *ir.Node) {
- if n == nil || n.Op != ir.ONONAME {
+ if n == nil || n.Op() != ir.ONONAME {
return n
}
defer tracePrint("resolve", n)(&res)
}
- if n.Sym.Pkg != ir.LocalPkg {
+ if n.Sym().Pkg != ir.LocalPkg {
if inimport {
base.Fatalf("recursive inimport")
}
return n
}
- r := ir.AsNode(n.Sym.Def)
+ r := ir.AsNode(n.Sym().Def)
if r == nil {
return n
}
- if r.Op == ir.OIOTA {
+ if r.Op() == ir.OIOTA {
if x := getIotaValue(); x >= 0 {
return nodintconst(x)
}
// collect all nodes with same Op
var cycle []*ir.Node
for _, n := range typecheck_tcstack[i:] {
- if n.Op == start.Op {
+ if n.Op() == start.Op() {
cycle = append(cycle, n)
}
}
lno := setlineno(n)
// Skip over parens.
- for n.Op == ir.OPAREN {
- n = n.Left
+ for n.Op() == ir.OPAREN {
+ n = n.Left()
}
// Resolve definition of name and value of iota lazily.
// Skip typecheck if already done.
// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
if n.Typecheck() == 1 {
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK:
break
if n.Typecheck() == 2 {
// Typechecking loop. Trying printing a meaningful message,
// otherwise a stack trace of typechecking.
- switch n.Op {
+ switch n.Op() {
// We can already diagnose variables used as types.
case ir.ONAME:
if top&(ctxExpr|ctxType) == ctxType {
// are substituted.
cycle := cycleFor(n)
for _, n1 := range cycle {
- if n1.Name != nil && !n1.Name.Param.Alias() {
+ if n1.Name() != nil && !n1.Name().Param.Alias() {
// Cycle is ok. But if n is an alias type and doesn't
// have a type yet, we have a recursive type declaration
// with aliases that we can't handle properly yet.
// Report an error rather than crashing later.
- if n.Name != nil && n.Name.Param.Alias() && n.Type == nil {
- base.Pos = n.Pos
+ if n.Name() != nil && n.Name().Param.Alias() && n.Type() == nil {
+ base.Pos = n.Pos()
base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
}
base.Pos = lno
return n
}
}
- base.ErrorfAt(n.Pos, "invalid recursive type alias %v%s", n, cycleTrace(cycle))
+ base.ErrorfAt(n.Pos(), "invalid recursive type alias %v%s", n, cycleTrace(cycle))
}
case ir.OLITERAL:
base.Errorf("%v is not a type", n)
break
}
- base.ErrorfAt(n.Pos, "constant definition loop%s", cycleTrace(cycleFor(n)))
+ base.ErrorfAt(n.Pos(), "constant definition loop%s", cycleTrace(cycleFor(n)))
}
if base.Errors() == 0 {
// The result of indexlit MUST be assigned back to n, e.g.
// n.Left = indexlit(n.Left)
func indexlit(n *ir.Node) *ir.Node {
- if n != nil && n.Type != nil && n.Type.Etype == types.TIDEAL {
+ if n != nil && n.Type() != nil && n.Type().Etype == types.TIDEAL {
return defaultlit(n, types.Types[types.TINT])
}
return n
defer tracePrint("typecheck1", n)(&res)
}
- switch n.Op {
+ switch n.Op() {
case ir.OLITERAL, ir.ONAME, ir.ONONAME, ir.OTYPE:
- if n.Sym == nil {
+ if n.Sym() == nil {
break
}
- if n.Op == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 {
- base.Errorf("use of builtin %v not in function call", n.Sym)
- n.Type = nil
+ if n.Op() == ir.ONAME && n.SubOp() != 0 && top&ctxCallee == 0 {
+ base.Errorf("use of builtin %v not in function call", n.Sym())
+ n.SetType(nil)
return n
}
typecheckdef(n)
- if n.Op == ir.ONONAME {
- n.Type = nil
+ if n.Op() == ir.ONONAME {
+ n.SetType(nil)
return n
}
}
ok := 0
- switch n.Op {
+ switch n.Op() {
// until typecheck is complete, do nothing.
default:
ir.Dump("typecheck", n)
- base.Fatalf("typecheck %v", n.Op)
+ base.Fatalf("typecheck %v", n.Op())
// names
case ir.OLITERAL:
ok |= ctxExpr
- if n.Type == nil && n.Val().Kind() == constant.String {
+ if n.Type() == nil && n.Val().Kind() == constant.String {
base.Fatalf("string literal missing type")
}
ok |= ctxExpr
case ir.ONAME:
- if n.Name.Decldepth == 0 {
- n.Name.Decldepth = decldepth
+ if n.Name().Decldepth == 0 {
+ n.Name().Decldepth = decldepth
}
if n.SubOp() != 0 {
ok |= ctxCallee
// not a write to the variable
if ir.IsBlank(n) {
base.Errorf("cannot use _ as value")
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Name.SetUsed(true)
+ n.Name().SetUsed(true)
}
ok |= ctxExpr
case ir.OPACK:
- base.Errorf("use of package %v without selector", n.Sym)
- n.Type = nil
+ base.Errorf("use of package %v without selector", n.Sym())
+ n.SetType(nil)
return n
case ir.ODDD:
case ir.OTYPE:
ok |= ctxType
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
case ir.OTARRAY:
ok |= ctxType
- r := typecheck(n.Right, ctxType)
- if r.Type == nil {
- n.Type = nil
+ r := typecheck(n.Right(), ctxType)
+ if r.Type() == nil {
+ n.SetType(nil)
return n
}
var t *types.Type
- if n.Left == nil {
- t = types.NewSlice(r.Type)
- } else if n.Left.Op == ir.ODDD {
+ if n.Left() == nil {
+ t = types.NewSlice(r.Type())
+ } else if n.Left().Op() == ir.ODDD {
if !n.Diag() {
n.SetDiag(true)
base.Errorf("use of [...] array outside of array literal")
}
- n.Type = nil
+ n.SetType(nil)
return n
} else {
- n.Left = indexlit(typecheck(n.Left, ctxExpr))
- l := n.Left
+ n.SetLeft(indexlit(typecheck(n.Left(), ctxExpr)))
+ l := n.Left()
if ir.ConstType(l) != constant.Int {
switch {
- case l.Type == nil:
+ case l.Type() == nil:
// Error already reported elsewhere.
- case l.Type.IsInteger() && l.Op != ir.OLITERAL:
+ case l.Type().IsInteger() && l.Op() != ir.OLITERAL:
base.Errorf("non-constant array bound %v", l)
default:
base.Errorf("invalid array bound %v", l)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
v := l.Val()
if doesoverflow(v, types.Types[types.TINT]) {
base.Errorf("array bound is too large")
- n.Type = nil
+ n.SetType(nil)
return n
}
if constant.Sign(v) < 0 {
base.Errorf("array bound must be non-negative")
- n.Type = nil
+ n.SetType(nil)
return n
}
bound, _ := constant.Int64Val(v)
- t = types.NewArray(r.Type, bound)
+ t = types.NewArray(r.Type(), bound)
}
setTypeNode(n, t)
- n.Left = nil
- n.Right = nil
+ n.SetLeft(nil)
+ n.SetRight(nil)
checkwidth(t)
case ir.OTMAP:
ok |= ctxType
- n.Left = typecheck(n.Left, ctxType)
- n.Right = typecheck(n.Right, ctxType)
- l := n.Left
- r := n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ n.SetRight(typecheck(n.Right(), ctxType))
+ l := n.Left()
+ r := n.Right()
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.NotInHeap() {
+ if l.Type().NotInHeap() {
base.Errorf("incomplete (or unallocatable) map key not allowed")
}
- if r.Type.NotInHeap() {
+ if r.Type().NotInHeap() {
base.Errorf("incomplete (or unallocatable) map value not allowed")
}
- setTypeNode(n, types.NewMap(l.Type, r.Type))
+ setTypeNode(n, types.NewMap(l.Type(), r.Type()))
mapqueue = append(mapqueue, n) // check map keys when all types are settled
- n.Left = nil
- n.Right = nil
+ n.SetLeft(nil)
+ n.SetRight(nil)
case ir.OTCHAN:
ok |= ctxType
- n.Left = typecheck(n.Left, ctxType)
- l := n.Left
- if l.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ l := n.Left()
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.NotInHeap() {
+ if l.Type().NotInHeap() {
base.Errorf("chan of incomplete (or unallocatable) type not allowed")
}
- setTypeNode(n, types.NewChan(l.Type, n.TChanDir()))
- n.Left = nil
+ setTypeNode(n, types.NewChan(l.Type(), n.TChanDir()))
+ n.SetLeft(nil)
n.ResetAux()
case ir.OTSTRUCT:
ok |= ctxType
- setTypeNode(n, tostruct(n.List.Slice()))
- n.List.Set(nil)
+ setTypeNode(n, tostruct(n.List().Slice()))
+ n.PtrList().Set(nil)
case ir.OTINTER:
ok |= ctxType
- setTypeNode(n, tointerface(n.List.Slice()))
+ setTypeNode(n, tointerface(n.List().Slice()))
case ir.OTFUNC:
ok |= ctxType
- setTypeNode(n, functype(n.Left, n.List.Slice(), n.Rlist.Slice()))
- n.Left = nil
- n.List.Set(nil)
- n.Rlist.Set(nil)
+ setTypeNode(n, functype(n.Left(), n.List().Slice(), n.Rlist().Slice()))
+ n.SetLeft(nil)
+ n.PtrList().Set(nil)
+ n.PtrRlist().Set(nil)
// type or expr
case ir.ODEREF:
- n.Left = typecheck(n.Left, ctxExpr|ctxType)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- if l.Op == ir.OTYPE {
+ if l.Op() == ir.OTYPE {
ok |= ctxType
- setTypeNode(n, types.NewPtr(l.Type))
- n.Left = nil
+ setTypeNode(n, types.NewPtr(l.Type()))
+ n.SetLeft(nil)
// Ensure l.Type gets dowidth'd for the backend. Issue 20174.
- checkwidth(l.Type)
+ checkwidth(l.Type())
break
}
if !t.IsPtr() {
if top&(ctxExpr|ctxStmt) != 0 {
- base.Errorf("invalid indirect of %L", n.Left)
- n.Type = nil
+ base.Errorf("invalid indirect of %L", n.Left())
+ n.SetType(nil)
return n
}
}
ok |= ctxExpr
- n.Type = t.Elem()
+ n.SetType(t.Elem())
// arithmetic exprs
case ir.OASOP,
var l *ir.Node
var op ir.Op
var r *ir.Node
- if n.Op == ir.OASOP {
+ if n.Op() == ir.OASOP {
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- l = n.Left
- r = n.Right
- checkassign(n, n.Left)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ l = n.Left()
+ r = n.Right()
+ checkassign(n, n.Left())
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- if n.Implicit() && !okforarith[l.Type.Etype] {
- base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type)
- n.Type = nil
+ if n.Implicit() && !okforarith[l.Type().Etype] {
+ base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type())
+ n.SetType(nil)
return n
}
// TODO(marvin): Fix Node.EType type union.
op = n.SubOp()
} else {
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- l = n.Left
- r = n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ l = n.Left()
+ r = n.Right()
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- op = n.Op
+ op = n.Op()
}
if op == ir.OLSH || op == ir.ORSH {
r = defaultlit(r, types.Types[types.TUINT])
- n.Right = r
- t := r.Type
+ n.SetRight(r)
+ t := r.Type()
if !t.IsInteger() {
- base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type)
- n.Type = nil
+ base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
+ n.SetType(nil)
return n
}
if t.IsSigned() && !langSupported(1, 13, curpkg()) {
- base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type)
- n.Type = nil
+ base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type())
+ n.SetType(nil)
return n
}
- t = l.Type
+ t = l.Type()
if t != nil && t.Etype != types.TIDEAL && !t.IsInteger() {
base.Errorf("invalid operation: %v (shift of type %v)", n, t)
- n.Type = nil
+ n.SetType(nil)
return n
}
// no defaultlit for left
// the outer context gives the type
- n.Type = l.Type
- if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == ir.OLITERAL {
- n.Type = types.UntypedInt
+ n.SetType(l.Type())
+ if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
+ n.SetType(types.UntypedInt)
}
break
// For "x == x && len(s)", it's better to report that "len(s)" (type int)
// can't be used with "&&" than to report that "x == x" (type untyped bool)
// can't be converted to int (see issue #41500).
- if n.Op == ir.OANDAND || n.Op == ir.OOROR {
- if !n.Left.Type.IsBoolean() {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type))
- n.Type = nil
+ if n.Op() == ir.OANDAND || n.Op() == ir.OOROR {
+ if !n.Left().Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Left().Type()))
+ n.SetType(nil)
return n
}
- if !n.Right.Type.IsBoolean() {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type))
- n.Type = nil
+ if !n.Right().Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Right().Type()))
+ n.SetType(nil)
return n
}
}
// ideal mixed with non-ideal
l, r = defaultlit2(l, r, false)
- n.Left = l
- n.Right = r
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(l)
+ n.SetRight(r)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- t := l.Type
+ t := l.Type()
if t.Etype == types.TIDEAL {
- t = r.Type
+ t = r.Type()
}
et := t.Etype
if et == types.TIDEAL {
et = types.TINT
}
aop := ir.OXXX
- if iscmp[n.Op] && t.Etype != types.TIDEAL && !types.Identical(l.Type, r.Type) {
+ if iscmp[n.Op()] && t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
// comparison is okay as long as one side is
// assignable to the other. convert so they have
// the same type.
// in that case, check comparability of the concrete type.
// The conversion allocates, so only do it if the concrete type is huge.
converted := false
- if r.Type.Etype != types.TBLANK {
- aop, _ = assignop(l.Type, r.Type)
+ if r.Type().Etype != types.TBLANK {
+ aop, _ = assignop(l.Type(), r.Type())
if aop != ir.OXXX {
- if r.Type.IsInterface() && !l.Type.IsInterface() && !IsComparable(l.Type) {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
- n.Type = nil
+ if r.Type().IsInterface() && !l.Type().IsInterface() && !IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
+ n.SetType(nil)
return n
}
- dowidth(l.Type)
- if r.Type.IsInterface() == l.Type.IsInterface() || l.Type.Width >= 1<<16 {
+ dowidth(l.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Width >= 1<<16 {
l = ir.Nod(aop, l, nil)
- l.Type = r.Type
+ l.SetType(r.Type())
l.SetTypecheck(1)
- n.Left = l
+ n.SetLeft(l)
}
- t = r.Type
+ t = r.Type()
converted = true
}
}
- if !converted && l.Type.Etype != types.TBLANK {
- aop, _ = assignop(r.Type, l.Type)
+ if !converted && l.Type().Etype != types.TBLANK {
+ aop, _ = assignop(r.Type(), l.Type())
if aop != ir.OXXX {
- if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
- n.Type = nil
+ if l.Type().IsInterface() && !r.Type().IsInterface() && !IsComparable(r.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
+ n.SetType(nil)
return n
}
- dowidth(r.Type)
- if r.Type.IsInterface() == l.Type.IsInterface() || r.Type.Width >= 1<<16 {
+ dowidth(r.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Width >= 1<<16 {
r = ir.Nod(aop, r, nil)
- r.Type = l.Type
+ r.SetType(l.Type())
r.SetTypecheck(1)
- n.Right = r
+ n.SetRight(r)
}
- t = l.Type
+ t = l.Type()
}
}
et = t.Etype
}
- if t.Etype != types.TIDEAL && !types.Identical(l.Type, r.Type) {
+ if t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
l, r = defaultlit2(l, r, true)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.IsInterface() == r.Type.IsInterface() || aop == 0 {
- base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
- n.Type = nil
+ if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
return n
}
}
if t.Etype == types.TIDEAL {
- t = mixUntyped(l.Type, r.Type)
+ t = mixUntyped(l.Type(), r.Type())
}
if dt := defaultType(t); !okfor[op][dt.Etype] {
base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
- n.Type = nil
+ n.SetType(nil)
return n
}
// okfor allows any array == array, map == map, func == func.
// restrict to slice/map/func == nil and nil == slice/map/func.
- if l.Type.IsArray() && !IsComparable(l.Type) {
- base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type)
- n.Type = nil
+ if l.Type().IsArray() && !IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type())
+ n.SetType(nil)
return n
}
- if l.Type.IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
+ if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
base.Errorf("invalid operation: %v (slice can only be compared to nil)", n)
- n.Type = nil
+ n.SetType(nil)
return n
}
- if l.Type.IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
+ if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
base.Errorf("invalid operation: %v (map can only be compared to nil)", n)
- n.Type = nil
+ n.SetType(nil)
return n
}
- if l.Type.Etype == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
+ if l.Type().Etype == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
- n.Type = nil
+ n.SetType(nil)
return n
}
- if l.Type.IsStruct() {
- if f := IncomparableField(l.Type); f != nil {
+ if l.Type().IsStruct() {
+ if f := IncomparableField(l.Type()); f != nil {
base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
- n.Type = nil
+ n.SetType(nil)
return n
}
}
- if iscmp[n.Op] {
+ if iscmp[n.Op()] {
t = types.UntypedBool
- n.Type = t
+ n.SetType(t)
n = evalConst(n)
- if n.Op != ir.OLITERAL {
+ if n.Op() != ir.OLITERAL {
l, r = defaultlit2(l, r, true)
- n.Left = l
- n.Right = r
+ n.SetLeft(l)
+ n.SetRight(r)
}
}
- if et == types.TSTRING && n.Op == ir.OADD {
+ if et == types.TSTRING && n.Op() == ir.OADD {
// create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
- if l.Op == ir.OADDSTR {
+ if l.Op() == ir.OADDSTR {
orig := n
n = l
- n.Pos = orig.Pos
+ n.SetPos(orig.Pos())
} else {
- n = ir.NodAt(n.Pos, ir.OADDSTR, nil, nil)
- n.List.Set1(l)
+ n = ir.NodAt(n.Pos(), ir.OADDSTR, nil, nil)
+ n.PtrList().Set1(l)
}
- if r.Op == ir.OADDSTR {
- n.List.AppendNodes(&r.List)
+ if r.Op() == ir.OADDSTR {
+ n.PtrList().AppendNodes(r.PtrList())
} else {
- n.List.Append(r)
+ n.PtrList().Append(r)
}
}
if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) {
if constant.Sign(r.Val()) == 0 {
base.Errorf("division by zero")
- n.Type = nil
+ n.SetType(nil)
return n
}
}
- n.Type = t
+ n.SetType(t)
case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- if !okfor[n.Op][defaultType(t).Etype] {
- base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t))
- n.Type = nil
+ if !okfor[n.Op()][defaultType(t).Etype] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
// exprs
case ir.OADDR:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
- switch n.Left.Op {
+ switch n.Left().Op() {
case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
- n.Op = ir.OPTRLIT
+ n.SetOp(ir.OPTRLIT)
default:
- checklvalue(n.Left, "take the address of")
- r := outervalue(n.Left)
- if r.Op == ir.ONAME {
- if r.Orig != r {
+ checklvalue(n.Left(), "take the address of")
+ r := outervalue(n.Left())
+ if r.Op() == ir.ONAME {
+ if r.Orig() != r {
base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
}
- r.Name.SetAddrtaken(true)
- if r.Name.IsClosureVar() && !capturevarscomplete {
+ r.Name().SetAddrtaken(true)
+ if r.Name().IsClosureVar() && !capturevarscomplete {
// Mark the original variable as Addrtaken so that capturevars
// knows not to pass it by value.
// But if the capturevars phase is complete, don't touch it,
// in case l.Name's containing function has not yet been compiled.
- r.Name.Defn.Name.SetAddrtaken(true)
+ r.Name().Defn.Name().SetAddrtaken(true)
}
}
- n.Left = defaultlit(n.Left, nil)
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
}
- n.Type = types.NewPtr(n.Left.Type)
+ n.SetType(types.NewPtr(n.Left().Type()))
case ir.OCOMPLIT:
ok |= ctxExpr
n = typecheckcomplit(n)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
case ir.OXDOT, ir.ODOT:
- if n.Op == ir.OXDOT {
+ if n.Op() == ir.OXDOT {
n = adddot(n)
- n.Op = ir.ODOT
- if n.Left == nil {
- n.Type = nil
+ n.SetOp(ir.ODOT)
+ if n.Left() == nil {
+ n.SetType(nil)
return n
}
}
- n.Left = typecheck(n.Left, ctxExpr|ctxType)
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType))
- n.Left = defaultlit(n.Left, nil)
+ n.SetLeft(defaultlit(n.Left(), nil))
- t := n.Left.Type
+ t := n.Left().Type()
if t == nil {
- base.UpdateErrorDot(ir.Line(n), n.Left.String(), n.String())
- n.Type = nil
+ base.UpdateErrorDot(ir.Line(n), n.Left().String(), n.String())
+ n.SetType(nil)
return n
}
- s := n.Sym
+ s := n.Sym()
- if n.Left.Op == ir.OTYPE {
+ if n.Left().Op() == ir.OTYPE {
n = typecheckMethodExpr(n)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
ok = ctxExpr
if t.IsPtr() && !t.Elem().IsInterface() {
t = t.Elem()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Op = ir.ODOTPTR
+ n.SetOp(ir.ODOTPTR)
checkwidth(t)
}
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
base.Errorf("cannot refer to blank field or method")
- n.Type = nil
+ n.SetType(nil)
return n
}
// Legitimate field or method lookup failed, try to explain the error
switch {
case t.IsEmptyInterface():
- base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left.Type)
+ base.Errorf("%v undefined (type %v is interface with no methods)", n, n.Left().Type())
case t.IsPtr() && t.Elem().IsInterface():
// Pointer to interface is almost always a mistake.
- base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type)
+ base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.Left().Type())
case lookdot(n, t, 1) != nil:
// Field or method matches by name, but it is not exported.
- base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym)
+ base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym())
default:
if mt := lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
- base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym)
+ base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left().Type(), n.Sym(), mt.Sym)
} else {
- base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym)
+ base.Errorf("%v undefined (type %v has no field or method %v)", n, n.Left().Type(), n.Sym())
}
}
- n.Type = nil
+ n.SetType(nil)
return n
}
- switch n.Op {
+ switch n.Op() {
case ir.ODOTINTER, ir.ODOTMETH:
if top&ctxCallee != 0 {
ok |= ctxCallee
case ir.ODOTTYPE:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsInterface() {
base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t)
- n.Type = nil
+ n.SetType(nil)
return n
}
- if n.Right != nil {
- n.Right = typecheck(n.Right, ctxType)
- n.Type = n.Right.Type
- n.Right = nil
- if n.Type == nil {
+ if n.Right() != nil {
+ n.SetRight(typecheck(n.Right(), ctxType))
+ n.SetType(n.Right().Type())
+ n.SetRight(nil)
+ if n.Type() == nil {
return n
}
}
- if n.Type != nil && !n.Type.IsInterface() {
+ if n.Type() != nil && !n.Type().IsInterface() {
var missing, have *types.Field
var ptr int
- if !implements(n.Type, t, &missing, &have, &ptr) {
+ if !implements(n.Type(), t, &missing, &have, &ptr) {
if have != nil && have.Sym == missing.Sym {
base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ "\t\thave %v%0S\n\t\twant %v%0S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
- base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym)
} else if have != nil {
base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
- "\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ "\t\thave %v%0S\n\t\twant %v%0S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else {
- base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym)
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
}
case ir.OINDEX:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Left = implicitstar(n.Left)
- l := n.Left
- n.Right = typecheck(n.Right, ctxExpr)
- r := n.Right
- t := l.Type
- if t == nil || r.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetLeft(implicitstar(n.Left()))
+ l := n.Left()
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ r := n.Right()
+ t := l.Type()
+ if t == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
switch t.Etype {
default:
base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
- n.Type = nil
+ n.SetType(nil)
return n
case types.TSTRING, types.TARRAY, types.TSLICE:
- n.Right = indexlit(n.Right)
+ n.SetRight(indexlit(n.Right()))
if t.IsString() {
- n.Type = types.Bytetype
+ n.SetType(types.Bytetype)
} else {
- n.Type = t.Elem()
+ n.SetType(t.Elem())
}
why := "string"
if t.IsArray() {
why = "slice"
}
- if n.Right.Type != nil && !n.Right.Type.IsInteger() {
- base.Errorf("non-integer %s index %v", why, n.Right)
+ if n.Right().Type() != nil && !n.Right().Type().IsInteger() {
+ base.Errorf("non-integer %s index %v", why, n.Right())
break
}
- if !n.Bounded() && ir.IsConst(n.Right, constant.Int) {
- x := n.Right.Val()
+ if !n.Bounded() && ir.IsConst(n.Right(), constant.Int) {
+ x := n.Right().Val()
if constant.Sign(x) < 0 {
- base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right)
+ base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Right())
} else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) {
- base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem())
- } else if ir.IsConst(n.Left, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left.StringVal())))) {
- base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.StringVal()))
+ base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Right(), t.NumElem())
+ } else if ir.IsConst(n.Left(), constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(n.Left().StringVal())))) {
+ base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Right(), len(n.Left().StringVal()))
} else if doesoverflow(x, types.Types[types.TINT]) {
- base.Errorf("invalid %s index %v (index too large)", why, n.Right)
+ base.Errorf("invalid %s index %v (index too large)", why, n.Right())
}
}
case types.TMAP:
- n.Right = assignconv(n.Right, t.Key(), "map index")
- n.Type = t.Elem()
- n.Op = ir.OINDEXMAP
+ n.SetRight(assignconv(n.Right(), t.Key(), "map index"))
+ n.SetType(t.Elem())
+ n.SetOp(ir.OINDEXMAP)
n.ResetAux()
}
case ir.ORECV:
ok |= ctxStmt | ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsChan() {
base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t)
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.ChanDir().CanRecv() {
base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t)
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = t.Elem()
+ n.SetType(t.Elem())
case ir.OSEND:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsChan() {
base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t)
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.ChanDir().CanSend() {
base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t)
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Right = assignconv(n.Right, t.Elem(), "send")
- if n.Right.Type == nil {
- n.Type = nil
+ n.SetRight(assignconv(n.Right(), t.Elem(), "send"))
+ if n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
- n.Type = nil
+ n.SetType(nil)
case ir.OSLICEHEADER:
// Errors here are Fatalf instead of Errorf because only the compiler
// have already been typechecked in e.g. OMAKESLICE earlier.
ok |= ctxExpr
- t := n.Type
+ t := n.Type()
if t == nil {
base.Fatalf("no type specified for OSLICEHEADER")
}
if !t.IsSlice() {
- base.Fatalf("invalid type %v for OSLICEHEADER", n.Type)
+ base.Fatalf("invalid type %v for OSLICEHEADER", n.Type())
}
- if n.Left == nil || n.Left.Type == nil || !n.Left.Type.IsUnsafePtr() {
+ if n.Left() == nil || n.Left().Type() == nil || !n.Left().Type().IsUnsafePtr() {
base.Fatalf("need unsafe.Pointer for OSLICEHEADER")
}
- if x := n.List.Len(); x != 2 {
+ if x := n.List().Len(); x != 2 {
base.Fatalf("expected 2 params (len, cap) for OSLICEHEADER, got %d", x)
}
- n.Left = typecheck(n.Left, ctxExpr)
- l := typecheck(n.List.First(), ctxExpr)
- c := typecheck(n.List.Second(), ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := typecheck(n.List().First(), ctxExpr)
+ c := typecheck(n.List().Second(), ctxExpr)
l = defaultlit(l, types.Types[types.TINT])
c = defaultlit(c, types.Types[types.TINT])
base.Fatalf("len larger than cap for OSLICEHEADER")
}
- n.List.SetFirst(l)
- n.List.SetSecond(c)
+ n.List().SetFirst(l)
+ n.List().SetSecond(c)
case ir.OMAKESLICECOPY:
// Errors here are Fatalf instead of Errorf because only the compiler
// have already been typechecked in OMAKE and OCOPY earlier.
ok |= ctxExpr
- t := n.Type
+ t := n.Type()
if t == nil {
base.Fatalf("no type specified for OMAKESLICECOPY")
}
if !t.IsSlice() {
- base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type)
+ base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type())
}
- if n.Left == nil {
+ if n.Left() == nil {
base.Fatalf("missing len argument for OMAKESLICECOPY")
}
- if n.Right == nil {
+ if n.Right() == nil {
base.Fatalf("missing slice argument to copy for OMAKESLICECOPY")
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Right = typecheck(n.Right, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- n.Left = defaultlit(n.Left, types.Types[types.TINT])
+ n.SetLeft(defaultlit(n.Left(), types.Types[types.TINT]))
- if !n.Left.Type.IsInteger() && n.Type.Etype != types.TIDEAL {
+ if !n.Left().Type().IsInteger() && n.Type().Etype != types.TIDEAL {
base.Errorf("non-integer len argument in OMAKESLICECOPY")
}
- if ir.IsConst(n.Left, constant.Int) {
- if doesoverflow(n.Left.Val(), types.Types[types.TINT]) {
+ if ir.IsConst(n.Left(), constant.Int) {
+ if doesoverflow(n.Left().Val(), types.Types[types.TINT]) {
base.Fatalf("len for OMAKESLICECOPY too large")
}
- if constant.Sign(n.Left.Val()) < 0 {
+ if constant.Sign(n.Left().Val()) < 0 {
base.Fatalf("len for OMAKESLICECOPY must be non-negative")
}
}
case ir.OSLICE, ir.OSLICE3:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
low, high, max := n.SliceBounds()
- hasmax := n.Op.IsSlice3()
+ hasmax := n.Op().IsSlice3()
low = typecheck(low, ctxExpr)
high = typecheck(high, ctxExpr)
max = typecheck(max, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
+ n.SetLeft(defaultlit(n.Left(), nil))
low = indexlit(low)
high = indexlit(high)
max = indexlit(max)
n.SetSliceBounds(low, high, max)
- l := n.Left
- if l.Type == nil {
- n.Type = nil
+ l := n.Left()
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
- if l.Type.IsArray() {
- if !islvalue(n.Left) {
+ if l.Type().IsArray() {
+ if !islvalue(n.Left()) {
base.Errorf("invalid operation %v (slice of unaddressable value)", n)
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = ir.Nod(ir.OADDR, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxExpr)
- l = n.Left
+ n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l = n.Left()
}
- t := l.Type
+ t := l.Type()
var tp *types.Type
if t.IsString() {
if hasmax {
base.Errorf("invalid operation %v (3-index slice of string)", n)
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = t
- n.Op = ir.OSLICESTR
+ n.SetType(t)
+ n.SetOp(ir.OSLICESTR)
} else if t.IsPtr() && t.Elem().IsArray() {
tp = t.Elem()
- n.Type = types.NewSlice(tp.Elem())
- dowidth(n.Type)
+ n.SetType(types.NewSlice(tp.Elem()))
+ dowidth(n.Type())
if hasmax {
- n.Op = ir.OSLICE3ARR
+ n.SetOp(ir.OSLICE3ARR)
} else {
- n.Op = ir.OSLICEARR
+ n.SetOp(ir.OSLICEARR)
}
} else if t.IsSlice() {
- n.Type = t
+ n.SetType(t)
} else {
base.Errorf("cannot slice %v (type %v)", l, t)
- n.Type = nil
+ n.SetType(nil)
return n
}
if low != nil && !checksliceindex(l, low, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if high != nil && !checksliceindex(l, high, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if max != nil && !checksliceindex(l, max, tp) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) {
- n.Type = nil
+ n.SetType(nil)
return n
}
// call and call like
case ir.OCALL:
- typecheckslice(n.Ninit.Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
- n.Left = typecheck(n.Left, ctxExpr|ctxType|ctxCallee)
- if n.Left.Diag() {
+ typecheckslice(n.Init().Slice(), ctxStmt) // imported rewritten f(g()) calls (#30907)
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxType|ctxCallee))
+ if n.Left().Diag() {
n.SetDiag(true)
}
- l := n.Left
+ l := n.Left()
- if l.Op == ir.ONAME && l.SubOp() != 0 {
+ if l.Op() == ir.ONAME && l.SubOp() != 0 {
if n.IsDDD() && l.SubOp() != ir.OAPPEND {
base.Errorf("invalid use of ... with builtin %v", l)
}
// builtin: OLEN, OCAP, etc.
- n.Op = l.SubOp()
- n.Left = n.Right
- n.Right = nil
+ n.SetOp(l.SubOp())
+ n.SetLeft(n.Right())
+ n.SetRight(nil)
n = typecheck1(n, top)
return n
}
- n.Left = defaultlit(n.Left, nil)
- l = n.Left
- if l.Op == ir.OTYPE {
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l = n.Left()
+ if l.Op() == ir.OTYPE {
if n.IsDDD() {
- if !l.Type.Broke() {
- base.Errorf("invalid use of ... in type conversion to %v", l.Type)
+ if !l.Type().Broke() {
+ base.Errorf("invalid use of ... in type conversion to %v", l.Type())
}
n.SetDiag(true)
}
ok |= ctxExpr
// turn CALL(type, arg) into CONV(arg) w/ type
- n.Left = nil
+ n.SetLeft(nil)
- n.Op = ir.OCONV
- n.Type = l.Type
- if !onearg(n, "conversion to %v", l.Type) {
- n.Type = nil
+ n.SetOp(ir.OCONV)
+ n.SetType(l.Type())
+ if !onearg(n, "conversion to %v", l.Type()) {
+ n.SetType(nil)
return n
}
n = typecheck1(n, top)
}
typecheckargs(n)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
checkwidth(t)
- switch l.Op {
+ switch l.Op() {
case ir.ODOTINTER:
- n.Op = ir.OCALLINTER
+ n.SetOp(ir.OCALLINTER)
case ir.ODOTMETH:
- n.Op = ir.OCALLMETH
+ n.SetOp(ir.OCALLMETH)
// typecheckaste was used here but there wasn't enough
// information further down the call chain to know if we
// It isn't necessary, so just do a sanity check.
tp := t.Recv().Type
- if l.Left == nil || !types.Identical(l.Left.Type, tp) {
+ if l.Left() == nil || !types.Identical(l.Left().Type(), tp) {
base.Fatalf("method receiver")
}
default:
- n.Op = ir.OCALLFUNC
+ n.SetOp(ir.OCALLFUNC)
if t.Etype != types.TFUNC {
name := l.String()
- if isBuiltinFuncName(name) && l.Name.Defn != nil {
+ if isBuiltinFuncName(name) && l.Name().Defn != nil {
// be more specific when the function
// name matches a predeclared function
base.Errorf("cannot call non-function %s (type %v), declared at %s",
- name, t, base.FmtPos(l.Name.Defn.Pos))
+ name, t, base.FmtPos(l.Name().Defn.Pos()))
} else {
base.Errorf("cannot call non-function %s (type %v)", name, t)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
}
- typecheckaste(ir.OCALL, n.Left, n.IsDDD(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
+ typecheckaste(ir.OCALL, n.Left(), n.IsDDD(), t.Params(), n.List(), func() string { return fmt.Sprintf("argument to %v", n.Left()) })
ok |= ctxStmt
if t.NumResults() == 0 {
break
}
ok |= ctxExpr
if t.NumResults() == 1 {
- n.Type = l.Type.Results().Field(0).Type
+ n.SetType(l.Type().Results().Field(0).Type)
- if n.Op == ir.OCALLFUNC && n.Left.Op == ir.ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.ONAME && isRuntimePkg(n.Left().Sym().Pkg) && n.Left().Sym().Name == "getg" {
// Emit code for runtime.getg() directly instead of calling function.
// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
// so that the ordering pass can make sure to preserve the semantics of the original code
// (in particular, the exact time of the function call) by introducing temporaries.
// In this case, we know getg() always returns the same result within a given function
// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
- n.Op = ir.OGETG
+ n.SetOp(ir.OGETG)
}
break
break
}
- n.Type = l.Type.Results()
+ n.SetType(l.Type().Results())
case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ if !onearg(n, "%v", n.Op()) {
+ n.SetType(nil)
return n
}
- n.Type = types.Types[types.TUINTPTR]
+ n.SetType(types.Types[types.TUINTPTR])
case ir.OCAP, ir.OLEN:
ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ if !onearg(n, "%v", n.Op()) {
+ n.SetType(nil)
return n
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- n.Left = implicitstar(n.Left)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetLeft(implicitstar(n.Left()))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
var ok bool
- if n.Op == ir.OLEN {
+ if n.Op() == ir.OLEN {
ok = okforlen[t.Etype]
} else {
ok = okforcap[t.Etype]
}
if !ok {
- base.Errorf("invalid argument %L for %v", l, n.Op)
- n.Type = nil
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
return n
}
- n.Type = types.Types[types.TINT]
+ n.SetType(types.Types[types.TINT])
case ir.OREAL, ir.OIMAG:
ok |= ctxExpr
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ if !onearg(n, "%v", n.Op()) {
+ n.SetType(nil)
return n
}
- n.Left = typecheck(n.Left, ctxExpr)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
// Determine result type.
switch t.Etype {
case types.TIDEAL:
- n.Type = types.UntypedFloat
+ n.SetType(types.UntypedFloat)
case types.TCOMPLEX64:
- n.Type = types.Types[types.TFLOAT32]
+ n.SetType(types.Types[types.TFLOAT32])
case types.TCOMPLEX128:
- n.Type = types.Types[types.TFLOAT64]
+ n.SetType(types.Types[types.TFLOAT64])
default:
- base.Errorf("invalid argument %L for %v", l, n.Op)
- n.Type = nil
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
return n
}
ok |= ctxExpr
typecheckargs(n)
if !twoarg(n) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- l := n.Left
- r := n.Right
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ l := n.Left()
+ r := n.Right()
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
l, r = defaultlit2(l, r, false)
- if l.Type == nil || r.Type == nil {
- n.Type = nil
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
return n
}
- n.Left = l
- n.Right = r
+ n.SetLeft(l)
+ n.SetRight(r)
- if !types.Identical(l.Type, r.Type) {
- base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
- n.Type = nil
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
return n
}
var t *types.Type
- switch l.Type.Etype {
+ switch l.Type().Etype {
default:
- base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type)
- n.Type = nil
+ base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
+ n.SetType(nil)
return n
case types.TIDEAL:
case types.TFLOAT64:
t = types.Types[types.TCOMPLEX128]
}
- n.Type = t
+ n.SetType(t)
case ir.OCLOSE:
- if !onearg(n, "%v", n.Op) {
- n.Type = nil
+ if !onearg(n, "%v", n.Op()) {
+ n.SetType(nil)
return n
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- l := n.Left
- t := l.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ l := n.Left()
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsChan() {
base.Errorf("invalid operation: %v (non-chan type %v)", n, t)
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.ChanDir().CanSend() {
base.Errorf("invalid operation: %v (cannot close receive-only channel)", n)
- n.Type = nil
+ n.SetType(nil)
return n
}
case ir.ODELETE:
ok |= ctxStmt
typecheckargs(n)
- args := n.List
+ args := n.List()
if args.Len() == 0 {
base.Errorf("missing arguments to delete")
- n.Type = nil
+ n.SetType(nil)
return n
}
if args.Len() == 1 {
base.Errorf("missing second (key) argument to delete")
- n.Type = nil
+ n.SetType(nil)
return n
}
if args.Len() != 2 {
base.Errorf("too many arguments to delete")
- n.Type = nil
+ n.SetType(nil)
return n
}
l := args.First()
r := args.Second()
- if l.Type != nil && !l.Type.IsMap() {
- base.Errorf("first argument to delete must be map; have %L", l.Type)
- n.Type = nil
+ if l.Type() != nil && !l.Type().IsMap() {
+ base.Errorf("first argument to delete must be map; have %L", l.Type())
+ n.SetType(nil)
return n
}
- args.SetSecond(assignconv(r, l.Type.Key(), "delete"))
+ args.SetSecond(assignconv(r, l.Type().Key(), "delete"))
case ir.OAPPEND:
ok |= ctxExpr
typecheckargs(n)
- args := n.List
+ args := n.List()
if args.Len() == 0 {
base.Errorf("missing arguments to append")
- n.Type = nil
+ n.SetType(nil)
return n
}
- t := args.First().Type
+ t := args.First().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
if !t.IsSlice() {
if ir.IsNil(args.First()) {
base.Errorf("first argument to append must be typed slice; have untyped nil")
- n.Type = nil
+ n.SetType(nil)
return n
}
base.Errorf("first argument to append must be slice; have %L", t)
- n.Type = nil
+ n.SetType(nil)
return n
}
if n.IsDDD() {
if args.Len() == 1 {
base.Errorf("cannot use ... on first argument to append")
- n.Type = nil
+ n.SetType(nil)
return n
}
if args.Len() != 2 {
base.Errorf("too many arguments to append")
- n.Type = nil
+ n.SetType(nil)
return n
}
- if t.Elem().IsKind(types.TUINT8) && args.Second().Type.IsString() {
+ if t.Elem().IsKind(types.TUINT8) && args.Second().Type().IsString() {
args.SetSecond(defaultlit(args.Second(), types.Types[types.TSTRING]))
break
}
as := args.Slice()[1:]
for i, n := range as {
- if n.Type == nil {
+ if n.Type() == nil {
continue
}
as[i] = assignconv(n, t.Elem(), "append")
- checkwidth(as[i].Type) // ensure width is calculated for backend
+ checkwidth(as[i].Type()) // ensure width is calculated for backend
}
case ir.OCOPY:
ok |= ctxStmt | ctxExpr
typecheckargs(n)
if !twoarg(n) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = types.Types[types.TINT]
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
+ n.SetType(types.Types[types.TINT])
+ if n.Left().Type() == nil || n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
- n.Left = defaultlit(n.Left, nil)
- n.Right = defaultlit(n.Right, nil)
- if n.Left.Type == nil || n.Right.Type == nil {
- n.Type = nil
+ n.SetLeft(defaultlit(n.Left(), nil))
+ n.SetRight(defaultlit(n.Right(), nil))
+ if n.Left().Type() == nil || n.Right().Type() == nil {
+ n.SetType(nil)
return n
}
// copy([]byte, string)
- if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
- if types.Identical(n.Left.Type.Elem(), types.Bytetype) {
+ if n.Left().Type().IsSlice() && n.Right().Type().IsString() {
+ if types.Identical(n.Left().Type().Elem(), types.Bytetype) {
break
}
- base.Errorf("arguments to copy have different element types: %L and string", n.Left.Type)
- n.Type = nil
+ base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type())
+ n.SetType(nil)
return n
}
- if !n.Left.Type.IsSlice() || !n.Right.Type.IsSlice() {
- if !n.Left.Type.IsSlice() && !n.Right.Type.IsSlice() {
- base.Errorf("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type)
- } else if !n.Left.Type.IsSlice() {
- base.Errorf("first argument to copy should be slice; have %L", n.Left.Type)
+ if !n.Left().Type().IsSlice() || !n.Right().Type().IsSlice() {
+ if !n.Left().Type().IsSlice() && !n.Right().Type().IsSlice() {
+ base.Errorf("arguments to copy must be slices; have %L, %L", n.Left().Type(), n.Right().Type())
+ } else if !n.Left().Type().IsSlice() {
+ base.Errorf("first argument to copy should be slice; have %L", n.Left().Type())
} else {
- base.Errorf("second argument to copy should be slice or string; have %L", n.Right.Type)
+ base.Errorf("second argument to copy should be slice or string; have %L", n.Right().Type())
}
- n.Type = nil
+ n.SetType(nil)
return n
}
- if !types.Identical(n.Left.Type.Elem(), n.Right.Type.Elem()) {
- base.Errorf("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type)
- n.Type = nil
+ if !types.Identical(n.Left().Type().Elem(), n.Right().Type().Elem()) {
+ base.Errorf("arguments to copy have different element types: %L and %L", n.Left().Type(), n.Right().Type())
+ n.SetType(nil)
return n
}
case ir.OCONV:
ok |= ctxExpr
- checkwidth(n.Type) // ensure width is calculated for backend
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = convlit1(n.Left, n.Type, true, nil)
- t := n.Left.Type
- if t == nil || n.Type == nil {
- n.Type = nil
- return n
- }
- op, why := convertop(n.Left.Op == ir.OLITERAL, t, n.Type)
- n.Op = op
- if n.Op == ir.OXXX {
- if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
- base.Errorf("cannot convert %L to type %v%s", n.Left, n.Type, why)
+ checkwidth(n.Type()) // ensure width is calculated for backend
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(convlit1(n.Left(), n.Type(), true, nil))
+ t := n.Left().Type()
+ if t == nil || n.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ op, why := convertop(n.Left().Op() == ir.OLITERAL, t, n.Type())
+ n.SetOp(op)
+ if n.Op() == ir.OXXX {
+ if !n.Diag() && !n.Type().Broke() && !n.Left().Diag() {
+ base.Errorf("cannot convert %L to type %v%s", n.Left(), n.Type(), why)
n.SetDiag(true)
}
- n.Op = ir.OCONV
- n.Type = nil
+ n.SetOp(ir.OCONV)
+ n.SetType(nil)
return n
}
- switch n.Op {
+ switch n.Op() {
case ir.OCONVNOP:
- if t.Etype == n.Type.Etype {
+ if t.Etype == n.Type().Etype {
switch t.Etype {
case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
// Floating point casts imply rounding and
// so the conversion must be kept.
- n.Op = ir.OCONV
+ n.SetOp(ir.OCONV)
}
}
break
case ir.OSTR2RUNES:
- if n.Left.Op == ir.OLITERAL {
+ if n.Left().Op() == ir.OLITERAL {
n = stringtoruneslit(n)
}
}
case ir.OMAKE:
ok |= ctxExpr
- args := n.List.Slice()
+ args := n.List().Slice()
if len(args) == 0 {
base.Errorf("missing argument to make")
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.List.Set(nil)
+ n.PtrList().Set(nil)
l := args[0]
l = typecheck(l, ctxType)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
switch t.Etype {
default:
base.Errorf("cannot make type %v", t)
- n.Type = nil
+ n.SetType(nil)
return n
case types.TSLICE:
if i >= len(args) {
base.Errorf("missing len argument to make(%v)", t)
- n.Type = nil
+ n.SetType(nil)
return n
}
r = typecheck(r, ctxExpr)
}
- if l.Type == nil || (r != nil && r.Type == nil) {
- n.Type = nil
+ if l.Type() == nil || (r != nil && r.Type() == nil) {
+ n.SetType(nil)
return n
}
if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
- n.Type = nil
+ n.SetType(nil)
return n
}
if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) {
base.Errorf("len larger than cap in make(%v)", t)
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = l
- n.Right = r
- n.Op = ir.OMAKESLICE
+ n.SetLeft(l)
+ n.SetRight(r)
+ n.SetOp(ir.OMAKESLICE)
case types.TMAP:
if i < len(args) {
i++
l = typecheck(l, ctxExpr)
l = defaultlit(l, types.Types[types.TINT])
- if l.Type == nil {
- n.Type = nil
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
if !checkmake(t, "size", &l) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = l
+ n.SetLeft(l)
} else {
- n.Left = nodintconst(0)
+ n.SetLeft(nodintconst(0))
}
- n.Op = ir.OMAKEMAP
+ n.SetOp(ir.OMAKEMAP)
case types.TCHAN:
l = nil
i++
l = typecheck(l, ctxExpr)
l = defaultlit(l, types.Types[types.TINT])
- if l.Type == nil {
- n.Type = nil
+ if l.Type() == nil {
+ n.SetType(nil)
return n
}
if !checkmake(t, "buffer", &l) {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = l
+ n.SetLeft(l)
} else {
- n.Left = nodintconst(0)
+ n.SetLeft(nodintconst(0))
}
- n.Op = ir.OMAKECHAN
+ n.SetOp(ir.OMAKECHAN)
}
if i < len(args) {
base.Errorf("too many arguments to make(%v)", t)
- n.Op = ir.OMAKE
- n.Type = nil
+ n.SetOp(ir.OMAKE)
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
case ir.ONEW:
ok |= ctxExpr
- args := n.List
+ args := n.List()
if args.Len() == 0 {
base.Errorf("missing argument to new")
- n.Type = nil
+ n.SetType(nil)
return n
}
l := args.First()
l = typecheck(l, ctxType)
- t := l.Type
+ t := l.Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if args.Len() > 1 {
base.Errorf("too many arguments to new(%v)", t)
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = l
- n.Type = types.NewPtr(t)
+ n.SetLeft(l)
+ n.SetType(types.NewPtr(t))
case ir.OPRINT, ir.OPRINTN:
ok |= ctxStmt
typecheckargs(n)
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
// Special case for print: int constant is int64, not int.
if ir.IsConst(n1, constant.Int) {
case ir.OPANIC:
ok |= ctxStmt
if !onearg(n, "panic") {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, types.Types[types.TINTER])
- if n.Left.Type == nil {
- n.Type = nil
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), types.Types[types.TINTER]))
+ if n.Left().Type() == nil {
+ n.SetType(nil)
return n
}
case ir.ORECOVER:
ok |= ctxExpr | ctxStmt
- if n.List.Len() != 0 {
+ if n.List().Len() != 0 {
base.Errorf("too many arguments to recover")
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = types.Types[types.TINTER]
+ n.SetType(types.Types[types.TINTER])
case ir.OCLOSURE:
ok |= ctxExpr
typecheckclosure(n, top)
- if n.Type == nil {
+ if n.Type() == nil {
return n
}
case ir.OITAB:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsInterface() {
base.Fatalf("OITAB of %v", t)
}
- n.Type = types.NewPtr(types.Types[types.TUINTPTR])
+ n.SetType(types.NewPtr(types.Types[types.TUINTPTR]))
case ir.OIDATA:
// Whoever creates the OIDATA node must know a priori the concrete type at that moment,
case ir.OSPTR:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ t := n.Left().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
if !t.IsSlice() && !t.IsString() {
base.Fatalf("OSPTR of %v", t)
}
if t.IsString() {
- n.Type = types.NewPtr(types.Types[types.TUINT8])
+ n.SetType(types.NewPtr(types.Types[types.TUINT8]))
} else {
- n.Type = types.NewPtr(t.Elem())
+ n.SetType(types.NewPtr(t.Elem()))
}
case ir.OCLOSUREVAR:
case ir.OCFUNC:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
- n.Type = types.Types[types.TUINTPTR]
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetType(types.Types[types.TUINTPTR])
case ir.OCONVNOP:
ok |= ctxExpr
- n.Left = typecheck(n.Left, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
// statements
case ir.OAS:
typecheckas(n)
// Code that creates temps does not bother to set defn, so do it here.
- if n.Left.Op == ir.ONAME && ir.IsAutoTmp(n.Left) {
- n.Left.Name.Defn = n
+ if n.Left().Op() == ir.ONAME && ir.IsAutoTmp(n.Left()) {
+ n.Left().Name().Defn = n
}
case ir.OAS2:
case ir.OLABEL:
ok |= ctxStmt
decldepth++
- if n.Sym.IsBlank() {
+ if n.Sym().IsBlank() {
// Empty identifier is valid but useless.
// Eliminate now to simplify life later.
// See issues 7538, 11589, 11593.
- n.Op = ir.OEMPTY
- n.Left = nil
+ n.SetOp(ir.OEMPTY)
+ n.SetLeft(nil)
}
case ir.ODEFER:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
- if !n.Left.Diag() {
+ n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr))
+ if !n.Left().Diag() {
checkdefergo(n)
}
case ir.OGO:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxStmt|ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxStmt|ctxExpr))
checkdefergo(n)
case ir.OFOR, ir.OFORUNTIL:
ok |= ctxStmt
- typecheckslice(n.Ninit.Slice(), ctxStmt)
+ typecheckslice(n.Init().Slice(), ctxStmt)
decldepth++
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- if n.Left != nil {
- t := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left() != nil {
+ t := n.Left().Type()
if t != nil && !t.IsBoolean() {
- base.Errorf("non-bool %L used as for condition", n.Left)
+ base.Errorf("non-bool %L used as for condition", n.Left())
}
}
- n.Right = typecheck(n.Right, ctxStmt)
- if n.Op == ir.OFORUNTIL {
- typecheckslice(n.List.Slice(), ctxStmt)
+ n.SetRight(typecheck(n.Right(), ctxStmt))
+ if n.Op() == ir.OFORUNTIL {
+ typecheckslice(n.List().Slice(), ctxStmt)
}
- typecheckslice(n.Nbody.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
decldepth--
case ir.OIF:
ok |= ctxStmt
- typecheckslice(n.Ninit.Slice(), ctxStmt)
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- if n.Left != nil {
- t := n.Left.Type
+ typecheckslice(n.Init().Slice(), ctxStmt)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ if n.Left() != nil {
+ t := n.Left().Type()
if t != nil && !t.IsBoolean() {
- base.Errorf("non-bool %L used as if condition", n.Left)
+ base.Errorf("non-bool %L used as if condition", n.Left())
}
}
- typecheckslice(n.Nbody.Slice(), ctxStmt)
- typecheckslice(n.Rlist.Slice(), ctxStmt)
+ typecheckslice(n.Body().Slice(), ctxStmt)
+ typecheckslice(n.Rlist().Slice(), ctxStmt)
case ir.ORETURN:
ok |= ctxStmt
typecheckargs(n)
if Curfn == nil {
base.Errorf("return outside function")
- n.Type = nil
+ n.SetType(nil)
return n
}
- if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 {
+ if Curfn.Type().FuncType().Outnamed && n.List().Len() == 0 {
break
}
- typecheckaste(ir.ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" })
+ typecheckaste(ir.ORETURN, nil, false, Curfn.Type().Results(), n.List(), func() string { return "return argument" })
case ir.ORETJMP:
ok |= ctxStmt
case ir.OTYPESW:
base.Errorf("use of .(type) outside type switch")
- n.Type = nil
+ n.SetType(nil)
return n
case ir.ODCLFUNC:
case ir.ODCLCONST:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxExpr)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
case ir.ODCLTYPE:
ok |= ctxStmt
- n.Left = typecheck(n.Left, ctxType)
- checkwidth(n.Left.Type)
+ n.SetLeft(typecheck(n.Left(), ctxType))
+ checkwidth(n.Left().Type())
}
- t := n.Type
- if t != nil && !t.IsFuncArgStruct() && n.Op != ir.OTYPE {
+ t := n.Type()
+ if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
switch t.Etype {
case types.TFUNC, // might have TANY; wait until it's called
types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
}
n = evalConst(n)
- if n.Op == ir.OTYPE && top&ctxType == 0 {
- if !n.Type.Broke() {
- base.Errorf("type %v is not an expression", n.Type)
+ if n.Op() == ir.OTYPE && top&ctxType == 0 {
+ if !n.Type().Broke() {
+ base.Errorf("type %v is not an expression", n.Type())
}
- n.Type = nil
+ n.SetType(nil)
return n
}
- if top&(ctxExpr|ctxType) == ctxType && n.Op != ir.OTYPE {
+ if top&(ctxExpr|ctxType) == ctxType && n.Op() != ir.OTYPE {
base.Errorf("%v is not a type", n)
- n.Type = nil
+ n.SetType(nil)
return n
}
// TODO(rsc): simplify
if (top&(ctxCallee|ctxExpr|ctxType) != 0) && top&ctxStmt == 0 && ok&(ctxExpr|ctxType|ctxCallee) == 0 {
base.Errorf("%v used as value", n)
- n.Type = nil
+ n.SetType(nil)
return n
}
n.SetDiag(true)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
}
func typecheckargs(n *ir.Node) {
- if n.List.Len() != 1 || n.IsDDD() {
- typecheckslice(n.List.Slice(), ctxExpr)
+ if n.List().Len() != 1 || n.IsDDD() {
+ typecheckslice(n.List().Slice(), ctxExpr)
return
}
- typecheckslice(n.List.Slice(), ctxExpr|ctxMultiOK)
- t := n.List.First().Type
+ typecheckslice(n.List().Slice(), ctxExpr|ctxMultiOK)
+ t := n.List().First().Type()
if t == nil || !t.IsFuncArgStruct() {
return
}
// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
// Save n as n.Orig for fmt.go.
- if n.Orig == n {
- n.Orig = ir.SepCopy(n)
+ if n.Orig() == n {
+ n.SetOrig(ir.SepCopy(n))
}
as := ir.Nod(ir.OAS2, nil, nil)
- as.Rlist.AppendNodes(&n.List)
+ as.PtrRlist().AppendNodes(n.PtrList())
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
}
for _, f := range t.FieldSlice() {
t := temp(f.Type)
- as.Ninit.Append(ir.Nod(ir.ODCL, t, nil))
- as.List.Append(t)
- n.List.Append(t)
+ as.PtrInit().Append(ir.Nod(ir.ODCL, t, nil))
+ as.PtrList().Append(t)
+ n.PtrList().Append(t)
}
if static {
Curfn = nil
}
as = typecheck(as, ctxStmt)
- n.Ninit.Append(as)
+ n.PtrInit().Append(as)
}
func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool {
- t := r.Type
+ t := r.Type()
if t == nil {
return false
}
return false
}
- if r.Op == ir.OLITERAL {
+ if r.Op() == ir.OLITERAL {
x := r.Val()
if constant.Sign(x) < 0 {
base.Errorf("invalid slice index %v (index must be non-negative)", r)
}
func checksliceconst(lo *ir.Node, hi *ir.Node) bool {
- if lo != nil && hi != nil && lo.Op == ir.OLITERAL && hi.Op == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
+ if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
base.Errorf("invalid slice index: %v > %v", lo, hi)
return false
}
func checkdefergo(n *ir.Node) {
what := "defer"
- if n.Op == ir.OGO {
+ if n.Op() == ir.OGO {
what = "go"
}
- switch n.Left.Op {
+ switch n.Left().Op() {
// ok
case ir.OCALLINTER,
ir.OCALLMETH,
ir.ONEW,
ir.OREAL,
ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
- if n.Left.Orig != nil && n.Left.Orig.Op == ir.OCONV {
+ if n.Left().Orig() != nil && n.Left().Orig().Op() == ir.OCONV {
break
}
- base.ErrorfAt(n.Pos, "%s discards result of %v", what, n.Left)
+ base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Left())
return
}
// type is broken or missing, most likely a method call on a broken type
// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
- if n.Left.Type == nil || n.Left.Type.Broke() {
+ if n.Left().Type() == nil || n.Left().Type().Broke() {
return
}
// The syntax made sure it was a call, so this must be
// a conversion.
n.SetDiag(true)
- base.ErrorfAt(n.Pos, "%s requires function call, not conversion", what)
+ base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what)
}
}
// n.Left = implicitstar(n.Left)
func implicitstar(n *ir.Node) *ir.Node {
// insert implicit * if needed for fixed array
- t := n.Type
+ t := n.Type()
if t == nil || !t.IsPtr() {
return n
}
}
func onearg(n *ir.Node, f string, args ...interface{}) bool {
- if n.Left != nil {
+ if n.Left() != nil {
return true
}
- if n.List.Len() == 0 {
+ if n.List().Len() == 0 {
p := fmt.Sprintf(f, args...)
base.Errorf("missing argument to %s: %v", p, n)
return false
}
- if n.List.Len() > 1 {
+ if n.List().Len() > 1 {
p := fmt.Sprintf(f, args...)
base.Errorf("too many arguments to %s: %v", p, n)
- n.Left = n.List.First()
- n.List.Set(nil)
+ n.SetLeft(n.List().First())
+ n.PtrList().Set(nil)
return false
}
- n.Left = n.List.First()
- n.List.Set(nil)
+ n.SetLeft(n.List().First())
+ n.PtrList().Set(nil)
return true
}
func twoarg(n *ir.Node) bool {
- if n.Left != nil {
+ if n.Left() != nil {
return true
}
- if n.List.Len() != 2 {
- if n.List.Len() < 2 {
+ if n.List().Len() != 2 {
+ if n.List().Len() < 2 {
base.Errorf("not enough arguments in call to %v", n)
} else {
base.Errorf("too many arguments in call to %v", n)
}
return false
}
- n.Left = n.List.First()
- n.Right = n.List.Second()
- n.List.Set(nil)
+ n.SetLeft(n.List().First())
+ n.SetRight(n.List().Second())
+ n.PtrList().Set(nil)
return true
}
defer tracePrint("typecheckMethodExpr", n)(&res)
}
- t := n.Left.Type
+ t := n.Left().Type()
// Compute the method set for t.
var ms *types.Fields
} else {
mt := methtype(t)
if mt == nil {
- base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym)
- n.Type = nil
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sym())
+ n.SetType(nil)
return n
}
expandmeth(mt)
}
}
- s := n.Sym
+ s := n.Sym()
m := lookdot1(n, s, t, ms, 0)
if m == nil {
if lookdot1(n, s, t, ms, 1) != nil {
} else {
base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
}
- n.Type = nil
+ n.SetType(nil)
return n
}
if !isMethodApplicable(t, m) {
base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Op = ir.OMETHEXPR
- if n.Name == nil {
- n.Name = new(ir.Name)
+ n.SetOp(ir.OMETHEXPR)
+ if n.Name() == nil {
+ n.SetName(new(ir.Name))
}
- n.Right = NewName(n.Sym)
- n.Sym = methodSym(t, n.Sym)
- n.Type = methodfunc(m.Type, n.Left.Type)
- n.Xoffset = 0
+ n.SetRight(NewName(n.Sym()))
+ n.SetSym(methodSym(t, n.Sym()))
+ n.SetType(methodfunc(m.Type, n.Left().Type()))
+ n.SetOffset(0)
n.SetClass(ir.PFUNC)
n.SetOpt(m)
// methodSym already marked n.Sym as a function.
// Issue 25065. Make sure that we emit the symbol for a local method.
if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == ir.LocalPkg) {
- makefuncsym(n.Sym)
+ makefuncsym(n.Sym())
}
return n
}
func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field {
- s := n.Sym
+ s := n.Sym()
dowidth(t)
var f1 *types.Field
}
var f2 *types.Field
- if n.Left.Type == t || n.Left.Type.Sym == nil {
+ if n.Left().Type() == t || n.Left().Type().Sym == nil {
mt := methtype(t)
if mt != nil {
f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
return f1
}
if f2 != nil {
- base.Errorf("%v is both field and method", n.Sym)
+ base.Errorf("%v is both field and method", n.Sym())
}
if f1.Offset == types.BADWIDTH {
base.Fatalf("lookdot badwidth %v %p", f1, f1)
}
- n.Xoffset = f1.Offset
- n.Type = f1.Type
+ n.SetOffset(f1.Offset)
+ n.SetType(f1.Type)
if t.IsInterface() {
- if n.Left.Type.IsPtr() {
- n.Left = ir.Nod(ir.ODEREF, n.Left, nil) // implicitstar
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxExpr)
+ if n.Left().Type().IsPtr() {
+ n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil)) // implicitstar
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
}
- n.Op = ir.ODOTINTER
+ n.SetOp(ir.ODOTINTER)
} else {
n.SetOpt(f1)
}
// Already in the process of diagnosing an error.
return f2
}
- tt := n.Left.Type
+ tt := n.Left().Type()
dowidth(tt)
rcvr := f2.Type.Recv().Type
if !types.Identical(rcvr, tt) {
if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
- checklvalue(n.Left, "call pointer method on")
- n.Left = ir.Nod(ir.OADDR, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ checklvalue(n.Left(), "call pointer method on")
+ n.SetLeft(ir.Nod(ir.OADDR, n.Left(), nil))
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
} else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
- n.Left = ir.Nod(ir.ODEREF, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil))
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
} else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
- base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
+ base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sym(), n.Left())
for tt.IsPtr() {
// Stop one level early for method with pointer receiver.
if rcvr.IsPtr() && !tt.Elem().IsPtr() {
break
}
- n.Left = ir.Nod(ir.ODEREF, n.Left, nil)
- n.Left.SetImplicit(true)
- n.Left = typecheck(n.Left, ctxType|ctxExpr)
+ n.SetLeft(ir.Nod(ir.ODEREF, n.Left(), nil))
+ n.Left().SetImplicit(true)
+ n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
tt = tt.Elem()
}
} else {
}
pll := n
- ll := n.Left
- for ll.Left != nil && (ll.Op == ir.ODOT || ll.Op == ir.ODOTPTR || ll.Op == ir.ODEREF) {
+ ll := n.Left()
+ for ll.Left() != nil && (ll.Op() == ir.ODOT || ll.Op() == ir.ODOTPTR || ll.Op() == ir.ODEREF) {
pll = ll
- ll = ll.Left
+ ll = ll.Left()
}
- if pll.Implicit() && ll.Type.IsPtr() && ll.Type.Sym != nil && ir.AsNode(ll.Type.Sym.Def) != nil && ir.AsNode(ll.Type.Sym.Def).Op == ir.OTYPE {
+ if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym != nil && ir.AsNode(ll.Type().Sym.Def) != nil && ir.AsNode(ll.Type().Sym.Def).Op() == ir.OTYPE {
// It is invalid to automatically dereference a named pointer type when selecting a method.
// Make n.Left == ll to clarify error message.
- n.Left = ll
+ n.SetLeft(ll)
return nil
}
- n.Sym = methodSym(n.Left.Type, f2.Sym)
- n.Xoffset = f2.Offset
- n.Type = f2.Type
- n.Op = ir.ODOTMETH
+ n.SetSym(methodSym(n.Left().Type(), f2.Sym))
+ n.SetOffset(f2.Offset)
+ n.SetType(f2.Type)
+ n.SetOp(ir.ODOTMETH)
n.SetOpt(f2)
return f2
func nokeys(l ir.Nodes) bool {
for _, n := range l.Slice() {
- if n.Op == ir.OKEY || n.Op == ir.OSTRUCTKEY {
+ if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
return false
}
}
}
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t, desc))
}
return
for ; i < nl.Len(); i++ {
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t.Elem(), desc))
}
}
}
n = nl.Index(i)
setlineno(n)
- if n.Type != nil {
+ if n.Type() != nil {
nl.SetIndex(i, assignconvfn(n, t, desc))
}
i++
return
notenough:
- if n == nil || (!n.Diag() && n.Type != nil) {
+ if n == nil || (!n.Diag() && n.Type() != nil) {
details := errorDetails(nl, tstruct, isddd)
if call != nil {
// call is the expression being called, not the overall call.
// Method expressions have the form T.M, and the compiler has
// rewritten those to ONAME nodes but left T in Left.
- if call.Op == ir.OMETHEXPR {
+ if call.Op() == ir.OMETHEXPR {
base.Errorf("not enough arguments in call to method expression %v%s", call, details)
} else {
base.Errorf("not enough arguments in call to %v%s", call, details)
}
// If any node has an unknown type, suppress it as well
for _, n := range nl.Slice() {
- if n.Type == nil {
+ if n.Type() == nil {
return ""
}
}
var typeStrings []string
for i, n := range nl.Slice() {
isdddArg := isddd && i == nl.Len()-1
- typeStrings = append(typeStrings, sigrepr(n.Type, isdddArg))
+ typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
}
return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
// pushtype adds elided type information for composite literals if
// appropriate, and returns the resulting expression.
func pushtype(n *ir.Node, t *types.Type) *ir.Node {
- if n == nil || n.Op != ir.OCOMPLIT || n.Right != nil {
+ if n == nil || n.Op() != ir.OCOMPLIT || n.Right() != nil {
return n
}
switch {
case iscomptype(t):
// For T, return T{...}.
- n.Right = typenod(t)
+ n.SetRight(typenod(t))
case t.IsPtr() && iscomptype(t.Elem()):
// For *T, return &T{...}.
- n.Right = typenod(t.Elem())
+ n.SetRight(typenod(t.Elem()))
- n = ir.NodAt(n.Pos, ir.OADDR, n, nil)
+ n = ir.NodAt(n.Pos(), ir.OADDR, n, nil)
n.SetImplicit(true)
}
base.Pos = lno
}()
- if n.Right == nil {
- base.ErrorfAt(n.Pos, "missing type in composite literal")
- n.Type = nil
+ if n.Right() == nil {
+ base.ErrorfAt(n.Pos(), "missing type in composite literal")
+ n.SetType(nil)
return n
}
// Save original node (including n.Right)
- n.Orig = ir.Copy(n)
+ n.SetOrig(ir.Copy(n))
- setlineno(n.Right)
+ setlineno(n.Right())
// Need to handle [...]T arrays specially.
- if n.Right.Op == ir.OTARRAY && n.Right.Left != nil && n.Right.Left.Op == ir.ODDD {
- n.Right.Right = typecheck(n.Right.Right, ctxType)
- if n.Right.Right.Type == nil {
- n.Type = nil
+ if n.Right().Op() == ir.OTARRAY && n.Right().Left() != nil && n.Right().Left().Op() == ir.ODDD {
+ n.Right().SetRight(typecheck(n.Right().Right(), ctxType))
+ if n.Right().Right().Type() == nil {
+ n.SetType(nil)
return n
}
- elemType := n.Right.Right.Type
+ elemType := n.Right().Right().Type()
- length := typecheckarraylit(elemType, -1, n.List.Slice(), "array literal")
+ length := typecheckarraylit(elemType, -1, n.List().Slice(), "array literal")
- n.Op = ir.OARRAYLIT
- n.Type = types.NewArray(elemType, length)
- n.Right = nil
+ n.SetOp(ir.OARRAYLIT)
+ n.SetType(types.NewArray(elemType, length))
+ n.SetRight(nil)
return n
}
- n.Right = typecheck(n.Right, ctxType)
- t := n.Right.Type
+ n.SetRight(typecheck(n.Right(), ctxType))
+ t := n.Right().Type()
if t == nil {
- n.Type = nil
+ n.SetType(nil)
return n
}
- n.Type = t
+ n.SetType(t)
switch t.Etype {
default:
base.Errorf("invalid composite literal type %v", t)
- n.Type = nil
+ n.SetType(nil)
case types.TARRAY:
- typecheckarraylit(t.Elem(), t.NumElem(), n.List.Slice(), "array literal")
- n.Op = ir.OARRAYLIT
- n.Right = nil
+ typecheckarraylit(t.Elem(), t.NumElem(), n.List().Slice(), "array literal")
+ n.SetOp(ir.OARRAYLIT)
+ n.SetRight(nil)
case types.TSLICE:
- length := typecheckarraylit(t.Elem(), -1, n.List.Slice(), "slice literal")
- n.Op = ir.OSLICELIT
- n.Right = nodintconst(length)
+ length := typecheckarraylit(t.Elem(), -1, n.List().Slice(), "slice literal")
+ n.SetOp(ir.OSLICELIT)
+ n.SetRight(nodintconst(length))
case types.TMAP:
var cs constSet
- for i3, l := range n.List.Slice() {
+ for i3, l := range n.List().Slice() {
setlineno(l)
- if l.Op != ir.OKEY {
- n.List.SetIndex(i3, typecheck(l, ctxExpr))
+ if l.Op() != ir.OKEY {
+ n.List().SetIndex(i3, typecheck(l, ctxExpr))
base.Errorf("missing key in map literal")
continue
}
- r := l.Left
+ r := l.Left()
r = pushtype(r, t.Key())
r = typecheck(r, ctxExpr)
- l.Left = assignconv(r, t.Key(), "map key")
- cs.add(base.Pos, l.Left, "key", "map literal")
+ l.SetLeft(assignconv(r, t.Key(), "map key"))
+ cs.add(base.Pos, l.Left(), "key", "map literal")
- r = l.Right
+ r = l.Right()
r = pushtype(r, t.Elem())
r = typecheck(r, ctxExpr)
- l.Right = assignconv(r, t.Elem(), "map value")
+ l.SetRight(assignconv(r, t.Elem(), "map value"))
}
- n.Op = ir.OMAPLIT
- n.Right = nil
+ n.SetOp(ir.OMAPLIT)
+ n.SetRight(nil)
case types.TSTRUCT:
// Need valid field offsets for Xoffset below.
dowidth(t)
errored := false
- if n.List.Len() != 0 && nokeys(n.List) {
+ if n.List().Len() != 0 && nokeys(n.List()) {
// simple list of variables
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i, n1 := range ls {
setlineno(n1)
n1 = typecheck(n1, ctxExpr)
// No pushtype allowed here. Must name fields for that.
n1 = assignconv(n1, f.Type, "field value")
n1 = nodSym(ir.OSTRUCTKEY, n1, f.Sym)
- n1.Xoffset = f.Offset
+ n1.SetOffset(f.Offset)
ls[i] = n1
}
if len(ls) < t.NumFields() {
hash := make(map[string]bool)
// keyed list
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i, l := range ls {
setlineno(l)
- if l.Op == ir.OKEY {
- key := l.Left
+ if l.Op() == ir.OKEY {
+ key := l.Left()
- l.Op = ir.OSTRUCTKEY
- l.Left = l.Right
- l.Right = nil
+ l.SetOp(ir.OSTRUCTKEY)
+ l.SetLeft(l.Right())
+ l.SetRight(nil)
// An OXDOT uses the Sym field to hold
// the field to the right of the dot,
// so s will be non-nil, but an OXDOT
// is never a valid struct literal key.
- if key.Sym == nil || key.Op == ir.OXDOT || key.Sym.IsBlank() {
+ if key.Sym() == nil || key.Op() == ir.OXDOT || key.Sym().IsBlank() {
base.Errorf("invalid field name %v in struct initializer", key)
- l.Left = typecheck(l.Left, ctxExpr)
+ l.SetLeft(typecheck(l.Left(), ctxExpr))
continue
}
// Sym might have resolved to name in other top-level
// package, because of import dot. Redirect to correct sym
// before we do the lookup.
- s := key.Sym
+ s := key.Sym()
if s.Pkg != ir.LocalPkg && types.IsExported(s.Name) {
s1 := lookup(s.Name)
if s1.Origpkg == s.Pkg {
s = s1
}
}
- l.Sym = s
+ l.SetSym(s)
}
- if l.Op != ir.OSTRUCTKEY {
+ if l.Op() != ir.OSTRUCTKEY {
if !errored {
base.Errorf("mixture of field:value and value initializers")
errored = true
continue
}
- f := lookdot1(nil, l.Sym, t, t.Fields(), 0)
+ f := lookdot1(nil, l.Sym(), t, t.Fields(), 0)
if f == nil {
- if ci := lookdot1(nil, l.Sym, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if ci := lookdot1(nil, l.Sym(), t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
if visible(ci.Sym) {
- base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym)
- } else if nonexported(l.Sym) && l.Sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
- base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym(), t, ci.Sym)
+ } else if nonexported(l.Sym()) && l.Sym().Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Sym(), t)
} else {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t)
}
continue
}
var f *types.Field
- p, _ := dotpath(l.Sym, t, &f, true)
+ p, _ := dotpath(l.Sym(), t, &f, true)
if p == nil || f.IsMethod() {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym, t)
+ base.Errorf("unknown field '%v' in struct literal of type %v", l.Sym(), t)
continue
}
// dotpath returns the parent embedded types in reverse order.
for ei := len(p) - 1; ei >= 0; ei-- {
ep = append(ep, p[ei].field.Sym.Name)
}
- ep = append(ep, l.Sym.Name)
+ ep = append(ep, l.Sym().Name)
base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
continue
}
fielddup(f.Sym.Name, hash)
- l.Xoffset = f.Offset
+ l.SetOffset(f.Offset)
// No pushtype allowed here. Tried and rejected.
- l.Left = typecheck(l.Left, ctxExpr)
- l.Left = assignconv(l.Left, f.Type, "field value")
+ l.SetLeft(typecheck(l.Left(), ctxExpr))
+ l.SetLeft(assignconv(l.Left(), f.Type, "field value"))
}
}
- n.Op = ir.OSTRUCTLIT
- n.Right = nil
+ n.SetOp(ir.OSTRUCTLIT)
+ n.SetRight(nil)
}
return n
// keys so we can check for duplicate indices.
var indices map[int64]bool
for _, elt := range elts {
- if elt.Op == ir.OKEY {
+ if elt.Op() == ir.OKEY {
indices = make(map[int64]bool)
break
}
setlineno(elt)
r := elts[i]
var kv *ir.Node
- if elt.Op == ir.OKEY {
- elt.Left = typecheck(elt.Left, ctxExpr)
- key = indexconst(elt.Left)
+ if elt.Op() == ir.OKEY {
+ elt.SetLeft(typecheck(elt.Left(), ctxExpr))
+ key = indexconst(elt.Left())
if key < 0 {
- if !elt.Left.Diag() {
+ if !elt.Left().Diag() {
if key == -2 {
base.Errorf("index too large")
} else {
base.Errorf("index must be non-negative integer constant")
}
- elt.Left.SetDiag(true)
+ elt.Left().SetDiag(true)
}
key = -(1 << 30) // stay negative for a while
}
kv = elt
- r = elt.Right
+ r = elt.Right()
}
r = pushtype(r, elemType)
r = typecheck(r, ctxExpr)
r = assignconv(r, elemType, ctx)
if kv != nil {
- kv.Right = r
+ kv.SetRight(r)
} else {
elts[i] = r
}
// lvalue etc
func islvalue(n *ir.Node) bool {
- switch n.Op {
+ switch n.Op() {
case ir.OINDEX:
- if n.Left.Type != nil && n.Left.Type.IsArray() {
- return islvalue(n.Left)
+ if n.Left().Type() != nil && n.Left().Type().IsArray() {
+ return islvalue(n.Left())
}
- if n.Left.Type != nil && n.Left.Type.IsString() {
+ if n.Left().Type() != nil && n.Left().Type().IsString() {
return false
}
fallthrough
return true
case ir.ODOT:
- return islvalue(n.Left)
+ return islvalue(n.Left())
case ir.ONAME:
if n.Class() == ir.PFUNC {
func checkassign(stmt *ir.Node, n *ir.Node) {
// Variables declared in ORANGE are assigned on every iteration.
- if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ir.ORANGE {
+ if n.Name() == nil || n.Name().Defn != stmt || stmt.Op() == ir.ORANGE {
r := outervalue(n)
- if r.Op == ir.ONAME {
- r.Name.SetAssigned(true)
- if r.Name.IsClosureVar() {
- r.Name.Defn.Name.SetAssigned(true)
+ if r.Op() == ir.ONAME {
+ r.Name().SetAssigned(true)
+ if r.Name().IsClosureVar() {
+ r.Name().Defn.Name().SetAssigned(true)
}
}
}
if islvalue(n) {
return
}
- if n.Op == ir.OINDEXMAP {
+ if n.Op() == ir.OINDEXMAP {
n.SetIndexMapLValue(true)
return
}
// have already complained about n being invalid
- if n.Type == nil {
+ if n.Type() == nil {
return
}
switch {
- case n.Op == ir.ODOT && n.Left.Op == ir.OINDEXMAP:
+ case n.Op() == ir.ODOT && n.Left().Op() == ir.OINDEXMAP:
base.Errorf("cannot assign to struct field %v in map", n)
- case (n.Op == ir.OINDEX && n.Left.Type.IsString()) || n.Op == ir.OSLICESTR:
+ case (n.Op() == ir.OINDEX && n.Left().Type().IsString()) || n.Op() == ir.OSLICESTR:
base.Errorf("cannot assign to %v (strings are immutable)", n)
- case n.Op == ir.OLITERAL && n.Sym != nil && isGoConst(n):
+ case n.Op() == ir.OLITERAL && n.Sym() != nil && isGoConst(n):
base.Errorf("cannot assign to %v (declared const)", n)
default:
base.Errorf("cannot assign to %v", n)
}
- n.Type = nil
+ n.SetType(nil)
}
func checkassignlist(stmt *ir.Node, l ir.Nodes) {
// lvalue expression is for OSLICE and OAPPEND optimizations, and it
// is correct in those settings.
func samesafeexpr(l *ir.Node, r *ir.Node) bool {
- if l.Op != r.Op || !types.Identical(l.Type, r.Type) {
+ if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
return false
}
- switch l.Op {
+ switch l.Op() {
case ir.ONAME, ir.OCLOSUREVAR:
return l == r
case ir.ODOT, ir.ODOTPTR:
- return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left)
+ return l.Sym() != nil && r.Sym() != nil && l.Sym() == r.Sym() && samesafeexpr(l.Left(), r.Left())
case ir.ODEREF, ir.OCONVNOP,
ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
- return samesafeexpr(l.Left, r.Left)
+ return samesafeexpr(l.Left(), r.Left())
case ir.OCONV:
// Some conversions can't be reused, such as []byte(str).
// Allow only numeric-ish types. This is a bit conservative.
- return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left)
+ return issimple[l.Type().Etype] && samesafeexpr(l.Left(), r.Left())
case ir.OINDEX, ir.OINDEXMAP,
ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
- return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
+ return samesafeexpr(l.Left(), r.Left()) && samesafeexpr(l.Right(), r.Right())
case ir.OLITERAL:
return constant.Compare(l.Val(), token.EQL, r.Val())
// if the variable has a type (ntype) then typechecking
// will not look at defn, so it is okay (and desirable,
// so that the conversion below happens).
- n.Left = resolve(n.Left)
+ n.SetLeft(resolve(n.Left()))
- if n.Left.Name == nil || n.Left.Name.Defn != n || n.Left.Name.Param.Ntype != nil {
- n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
+ if n.Left().Name() == nil || n.Left().Name().Defn != n || n.Left().Name().Param.Ntype != nil {
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign))
}
// Use ctxMultiOK so we can emit an "N variables but M values" error
// to be consistent with typecheckas2 (#26616).
- n.Right = typecheck(n.Right, ctxExpr|ctxMultiOK)
- checkassign(n, n.Left)
- if n.Right != nil && n.Right.Type != nil {
- if n.Right.Type.IsFuncArgStruct() {
- base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields())
+ n.SetRight(typecheck(n.Right(), ctxExpr|ctxMultiOK))
+ checkassign(n, n.Left())
+ if n.Right() != nil && n.Right().Type() != nil {
+ if n.Right().Type().IsFuncArgStruct() {
+ base.Errorf("assignment mismatch: 1 variable but %v returns %d values", n.Right().Left(), n.Right().Type().NumFields())
// Multi-value RHS isn't actually valid for OAS; nil out
// to indicate failed typechecking.
- n.Right.Type = nil
- } else if n.Left.Type != nil {
- n.Right = assignconv(n.Right, n.Left.Type, "assignment")
+ n.Right().SetType(nil)
+ } else if n.Left().Type() != nil {
+ n.SetRight(assignconv(n.Right(), n.Left().Type(), "assignment"))
}
}
- if n.Left.Name != nil && n.Left.Name.Defn == n && n.Left.Name.Param.Ntype == nil {
- n.Right = defaultlit(n.Right, nil)
- n.Left.Type = n.Right.Type
+ if n.Left().Name() != nil && n.Left().Name().Defn == n && n.Left().Name().Param.Ntype == nil {
+ n.SetRight(defaultlit(n.Right(), nil))
+ n.Left().SetType(n.Right().Type())
}
// second half of dance.
// just to get it over with. see dance above.
n.SetTypecheck(1)
- if n.Left.Typecheck() == 0 {
- n.Left = typecheck(n.Left, ctxExpr|ctxAssign)
+ if n.Left().Typecheck() == 0 {
+ n.SetLeft(typecheck(n.Left(), ctxExpr|ctxAssign))
}
- if !ir.IsBlank(n.Left) {
- checkwidth(n.Left.Type) // ensure width is calculated for backend
+ if !ir.IsBlank(n.Left()) {
+ checkwidth(n.Left().Type()) // ensure width is calculated for backend
}
}
func checkassignto(src *types.Type, dst *ir.Node) {
- if op, why := assignop(src, dst.Type); op == ir.OXXX {
+ if op, why := assignop(src, dst.Type()); op == ir.OXXX {
base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
return
}
defer tracePrint("typecheckas2", n)(nil)
}
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
// delicate little dance.
n1 = resolve(n1)
ls[i1] = n1
- if n1.Name == nil || n1.Name.Defn != n || n1.Name.Param.Ntype != nil {
+ if n1.Name() == nil || n1.Name().Defn != n || n1.Name().Param.Ntype != nil {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
- cl := n.List.Len()
- cr := n.Rlist.Len()
+ cl := n.List().Len()
+ cr := n.Rlist().Len()
if cl > 1 && cr == 1 {
- n.Rlist.SetFirst(typecheck(n.Rlist.First(), ctxExpr|ctxMultiOK))
+ n.Rlist().SetFirst(typecheck(n.Rlist().First(), ctxExpr|ctxMultiOK))
} else {
- typecheckslice(n.Rlist.Slice(), ctxExpr)
+ typecheckslice(n.Rlist().Slice(), ctxExpr)
}
- checkassignlist(n, n.List)
+ checkassignlist(n, n.List())
var l *ir.Node
var r *ir.Node
if cl == cr {
// easy
- ls := n.List.Slice()
- rs := n.Rlist.Slice()
+ ls := n.List().Slice()
+ rs := n.Rlist().Slice()
for il, nl := range ls {
nr := rs[il]
- if nl.Type != nil && nr.Type != nil {
- rs[il] = assignconv(nr, nl.Type, "assignment")
+ if nl.Type() != nil && nr.Type() != nil {
+ rs[il] = assignconv(nr, nl.Type(), "assignment")
}
- if nl.Name != nil && nl.Name.Defn == n && nl.Name.Param.Ntype == nil {
+ if nl.Name() != nil && nl.Name().Defn == n && nl.Name().Param.Ntype == nil {
rs[il] = defaultlit(rs[il], nil)
- nl.Type = rs[il].Type
+ nl.SetType(rs[il].Type())
}
}
goto out
}
- l = n.List.First()
- r = n.Rlist.First()
+ l = n.List().First()
+ r = n.Rlist().First()
// x,y,z = f()
if cr == 1 {
- if r.Type == nil {
+ if r.Type() == nil {
goto out
}
- switch r.Op {
+ switch r.Op() {
case ir.OCALLMETH, ir.OCALLINTER, ir.OCALLFUNC:
- if !r.Type.IsFuncArgStruct() {
+ if !r.Type().IsFuncArgStruct() {
break
}
- cr = r.Type.NumFields()
+ cr = r.Type().NumFields()
if cr != cl {
goto mismatch
}
- n.Op = ir.OAS2FUNC
- n.Right = r
- n.Rlist.Set(nil)
- for i, l := range n.List.Slice() {
- f := r.Type.Field(i)
- if f.Type != nil && l.Type != nil {
+ n.SetOp(ir.OAS2FUNC)
+ n.SetRight(r)
+ n.PtrRlist().Set(nil)
+ for i, l := range n.List().Slice() {
+ f := r.Type().Field(i)
+ if f.Type != nil && l.Type() != nil {
checkassignto(f.Type, l)
}
- if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
- l.Type = f.Type
+ if l.Name() != nil && l.Name().Defn == n && l.Name().Param.Ntype == nil {
+ l.SetType(f.Type)
}
}
goto out
// x, ok = y
if cl == 2 && cr == 1 {
- if r.Type == nil {
+ if r.Type() == nil {
goto out
}
- switch r.Op {
+ switch r.Op() {
case ir.OINDEXMAP, ir.ORECV, ir.ODOTTYPE:
- switch r.Op {
+ switch r.Op() {
case ir.OINDEXMAP:
- n.Op = ir.OAS2MAPR
+ n.SetOp(ir.OAS2MAPR)
case ir.ORECV:
- n.Op = ir.OAS2RECV
+ n.SetOp(ir.OAS2RECV)
case ir.ODOTTYPE:
- n.Op = ir.OAS2DOTTYPE
- r.Op = ir.ODOTTYPE2
+ n.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODOTTYPE2)
}
- n.Right = r
- n.Rlist.Set(nil)
- if l.Type != nil {
- checkassignto(r.Type, l)
+ n.SetRight(r)
+ n.PtrRlist().Set(nil)
+ if l.Type() != nil {
+ checkassignto(r.Type(), l)
}
- if l.Name != nil && l.Name.Defn == n {
- l.Type = r.Type
+ if l.Name() != nil && l.Name().Defn == n {
+ l.SetType(r.Type())
}
- l := n.List.Second()
- if l.Type != nil && !l.Type.IsBoolean() {
+ l := n.List().Second()
+ if l.Type() != nil && !l.Type().IsBoolean() {
checkassignto(types.Types[types.TBOOL], l)
}
- if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
- l.Type = types.Types[types.TBOOL]
+ if l.Name() != nil && l.Name().Defn == n && l.Name().Param.Ntype == nil {
+ l.SetType(types.Types[types.TBOOL])
}
goto out
}
}
mismatch:
- switch r.Op {
+ switch r.Op() {
default:
base.Errorf("assignment mismatch: %d variables but %d values", cl, cr)
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr)
+ base.Errorf("assignment mismatch: %d variables but %v returns %d values", cl, r.Left(), cr)
}
// second half of dance
out:
n.SetTypecheck(1)
- ls = n.List.Slice()
+ ls = n.List().Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
defer tracePrint("typecheckfunc", n)(nil)
}
- for _, ln := range n.Func.Dcl {
- if ln.Op == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) {
- ln.Name.Decldepth = 1
+ for _, ln := range n.Func().Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PPARAM || ln.Class() == ir.PPARAMOUT) {
+ ln.Name().Decldepth = 1
}
}
- n.Func.Nname = typecheck(n.Func.Nname, ctxExpr|ctxAssign)
- t := n.Func.Nname.Type
+ n.Func().Nname = typecheck(n.Func().Nname, ctxExpr|ctxAssign)
+ t := n.Func().Nname.Type()
if t == nil {
return
}
- n.Type = t
+ n.SetType(t)
rcvr := t.Recv()
- if rcvr != nil && n.Func.Shortname != nil {
- m := addmethod(n, n.Func.Shortname, t, true, n.Func.Pragma&ir.Nointerface != 0)
+ if rcvr != nil && n.Func().Shortname != nil {
+ m := addmethod(n, n.Func().Shortname, t, true, n.Func().Pragma&ir.Nointerface != 0)
if m == nil {
return
}
- n.Func.Nname.Sym = methodSym(rcvr.Type, n.Func.Shortname)
- declare(n.Func.Nname, ir.PFUNC)
+ n.Func().Nname.SetSym(methodSym(rcvr.Type, n.Func().Shortname))
+ declare(n.Func().Nname, ir.PFUNC)
}
- if base.Ctxt.Flag_dynlink && !inimport && n.Func.Nname != nil {
- makefuncsym(n.Func.Nname.Sym)
+ if base.Ctxt.Flag_dynlink && !inimport && n.Func().Nname != nil {
+ makefuncsym(n.Func().Nname.Sym())
}
}
// The result of stringtoruneslit MUST be assigned back to n, e.g.
// n.Left = stringtoruneslit(n.Left)
func stringtoruneslit(n *ir.Node) *ir.Node {
- if n.Left.Op != ir.OLITERAL || n.Left.Val().Kind() != constant.String {
+ if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String {
base.Fatalf("stringtoarraylit %v", n)
}
var l []*ir.Node
i := 0
- for _, r := range n.Left.StringVal() {
+ for _, r := range n.Left().StringVal() {
l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
i++
}
- nn := ir.Nod(ir.OCOMPLIT, nil, typenod(n.Type))
- nn.List.Set(l)
+ nn := ir.Nod(ir.OCOMPLIT, nil, typenod(n.Type()))
+ nn.PtrList().Set(l)
nn = typecheck(nn, ctxExpr)
return nn
}
func checkMapKeys() {
for _, n := range mapqueue {
- k := n.Type.MapType().Key
+ k := n.Type().MapType().Key
if !k.Broke() && !IsComparable(k) {
- base.ErrorfAt(n.Pos, "invalid map key type %v", k)
+ base.ErrorfAt(n.Pos(), "invalid map key type %v", k)
}
}
mapqueue = nil
// Restore unnecessarily clobbered attributes.
t.Nod = ir.AsTypesNode(n)
- t.Sym = n.Sym
- if n.Name != nil {
- t.Vargen = n.Name.Vargen
+ t.Sym = n.Sym()
+ if n.Name() != nil {
+ t.Vargen = n.Name().Vargen
}
t.Cache = cache
t.SetDeferwidth(false)
}
// Propagate go:notinheap pragma from the Name to the Type.
- if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma()&ir.NotInHeap != 0 {
+ if n.Name() != nil && n.Name().Param != nil && n.Name().Param.Pragma()&ir.NotInHeap != 0 {
t.SetNotInHeap(true)
}
}
n.SetTypecheck(1)
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- t := n.Name.Param.Ntype.Type
+ n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType)
+ t := n.Name().Param.Ntype.Type()
if t == nil {
n.SetDiag(true)
- n.Type = nil
- } else if n.Type == nil {
+ n.SetType(nil)
+ } else if n.Type() == nil {
n.SetDiag(true)
} else {
// copy new type and clear fields
// that don't come along.
- setUnderlying(n.Type, t)
+ setUnderlying(n.Type(), t)
}
}
lno := setlineno(n)
- if n.Op == ir.ONONAME {
+ if n.Op() == ir.ONONAME {
if !n.Diag() {
n.SetDiag(true)
// Note: adderrorname looks for this string and
// adds context about the outer expression
- base.ErrorfAt(base.Pos, "undefined: %v", n.Sym)
+ base.ErrorfAt(base.Pos, "undefined: %v", n.Sym())
}
base.Pos = lno
return
fmt.Printf("typecheckdef loop:")
for i := len(typecheckdefstack) - 1; i >= 0; i-- {
n := typecheckdefstack[i]
- fmt.Printf(" %v", n.Sym)
+ fmt.Printf(" %v", n.Sym())
}
fmt.Printf("\n")
base.Fatalf("typecheckdef loop")
n.SetWalkdef(2)
- if n.Type != nil || n.Sym == nil { // builtin or no name
+ if n.Type() != nil || n.Sym() == nil { // builtin or no name
goto ret
}
- switch n.Op {
+ switch n.Op() {
default:
- base.Fatalf("typecheckdef %v", n.Op)
+ base.Fatalf("typecheckdef %v", n.Op())
case ir.OLITERAL:
- if n.Name.Param.Ntype != nil {
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- n.Type = n.Name.Param.Ntype.Type
- n.Name.Param.Ntype = nil
- if n.Type == nil {
+ if n.Name().Param.Ntype != nil {
+ n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType)
+ n.SetType(n.Name().Param.Ntype.Type())
+ n.Name().Param.Ntype = nil
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
}
- e := n.Name.Defn
- n.Name.Defn = nil
+ e := n.Name().Defn
+ n.Name().Defn = nil
if e == nil {
ir.Dump("typecheckdef nil defn", n)
- base.ErrorfAt(n.Pos, "xxx")
+ base.ErrorfAt(n.Pos(), "xxx")
}
e = typecheck(e, ctxExpr)
- if e.Type == nil {
+ if e.Type() == nil {
goto ret
}
if !isGoConst(e) {
if !e.Diag() {
- if e.Op == ir.ONIL {
- base.ErrorfAt(n.Pos, "const initializer cannot be nil")
+ if e.Op() == ir.ONIL {
+ base.ErrorfAt(n.Pos(), "const initializer cannot be nil")
} else {
- base.ErrorfAt(n.Pos, "const initializer %v is not a constant", e)
+ base.ErrorfAt(n.Pos(), "const initializer %v is not a constant", e)
}
e.SetDiag(true)
}
goto ret
}
- t := n.Type
+ t := n.Type()
if t != nil {
if !ir.OKForConst[t.Etype] {
- base.ErrorfAt(n.Pos, "invalid constant type %v", t)
+ base.ErrorfAt(n.Pos(), "invalid constant type %v", t)
goto ret
}
- if !e.Type.IsUntyped() && !types.Identical(t, e.Type) {
- base.ErrorfAt(n.Pos, "cannot use %L as type %v in const initializer", e, t)
+ if !e.Type().IsUntyped() && !types.Identical(t, e.Type()) {
+ base.ErrorfAt(n.Pos(), "cannot use %L as type %v in const initializer", e, t)
goto ret
}
e = convlit(e, t)
}
- n.Type = e.Type
- if n.Type != nil {
+ n.SetType(e.Type())
+ if n.Type() != nil {
n.SetVal(e.Val())
}
case ir.ONAME:
- if n.Name.Param.Ntype != nil {
- n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, ctxType)
- n.Type = n.Name.Param.Ntype.Type
- if n.Type == nil {
+ if n.Name().Param.Ntype != nil {
+ n.Name().Param.Ntype = typecheck(n.Name().Param.Ntype, ctxType)
+ n.SetType(n.Name().Param.Ntype.Type())
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
}
- if n.Type != nil {
+ if n.Type() != nil {
break
}
- if n.Name.Defn == nil {
+ if n.Name().Defn == nil {
if n.SubOp() != 0 { // like OPRINTN
break
}
break
}
- base.Fatalf("var without type, init: %v", n.Sym)
+ base.Fatalf("var without type, init: %v", n.Sym())
}
- if n.Name.Defn.Op == ir.ONAME {
- n.Name.Defn = typecheck(n.Name.Defn, ctxExpr)
- n.Type = n.Name.Defn.Type
+ if n.Name().Defn.Op() == ir.ONAME {
+ n.Name().Defn = typecheck(n.Name().Defn, ctxExpr)
+ n.SetType(n.Name().Defn.Type())
break
}
- n.Name.Defn = typecheck(n.Name.Defn, ctxStmt) // fills in n.Type
+ n.Name().Defn = typecheck(n.Name().Defn, ctxStmt) // fills in n.Type
case ir.OTYPE:
- if p := n.Name.Param; p.Alias() {
+ if p := n.Name().Param; p.Alias() {
// Type alias declaration: Simply use the rhs type - no need
// to create a new type.
// If we have a syntax error, p.Ntype may be nil.
if p.Ntype != nil {
p.Ntype = typecheck(p.Ntype, ctxType)
- n.Type = p.Ntype.Type
- if n.Type == nil {
+ n.SetType(p.Ntype.Type())
+ if n.Type() == nil {
n.SetDiag(true)
goto ret
}
// For package-level type aliases, set n.Sym.Def so we can identify
// it as a type alias during export. See also #31959.
- if n.Name.Curfn == nil {
- n.Sym.Def = ir.AsTypesNode(p.Ntype)
+ if n.Name().Curfn == nil {
+ n.Sym().Def = ir.AsTypesNode(p.Ntype)
}
}
break
defercheckwidth()
n.SetWalkdef(1)
setTypeNode(n, types.New(types.TFORW))
- n.Type.Sym = n.Sym
+ n.Type().Sym = n.Sym()
errorsBefore := base.Errors()
typecheckdeftype(n)
- if n.Type.Etype == types.TFORW && base.Errors() > errorsBefore {
+ if n.Type().Etype == types.TFORW && base.Errors() > errorsBefore {
// Something went wrong during type-checking,
// but it was reported. Silence future errors.
- n.Type.SetBroke(true)
+ n.Type().SetBroke(true)
}
resumecheckwidth()
}
ret:
- if n.Op != ir.OLITERAL && n.Type != nil && n.Type.IsUntyped() {
- base.Fatalf("got %v for %v", n.Type, n)
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().IsUntyped() {
+ base.Fatalf("got %v for %v", n.Type(), n)
}
last := len(typecheckdefstack) - 1
if typecheckdefstack[last] != n {
func checkmake(t *types.Type, arg string, np **ir.Node) bool {
n := *np
- if !n.Type.IsInteger() && n.Type.Etype != types.TIDEAL {
- base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
+ if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL {
+ base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
return false
}
// Do range checks for constants before defaultlit
// to avoid redundant "constant NNN overflows int" errors.
- if n.Op == ir.OLITERAL {
+ if n.Op() == ir.OLITERAL {
v := toint(n.Val())
if constant.Sign(v) < 0 {
base.Errorf("negative %s argument in make(%v)", arg, t)
return
}
- switch n.Op {
+ switch n.Op() {
case ir.OBREAK:
- if n.Sym == nil {
+ if n.Sym() == nil {
if implicit != nil {
implicit.SetHasBreak(true)
}
} else {
- lab := ir.AsNode(n.Sym.Label)
+ lab := ir.AsNode(n.Sym().Label)
if lab != nil {
lab.SetHasBreak(true)
}
implicit = n
fallthrough
default:
- markbreak(n.Left, implicit)
- markbreak(n.Right, implicit)
- markbreaklist(n.Ninit, implicit)
- markbreaklist(n.Nbody, implicit)
- markbreaklist(n.List, implicit)
- markbreaklist(n.Rlist, implicit)
+ markbreak(n.Left(), implicit)
+ markbreak(n.Right(), implicit)
+ markbreaklist(n.Init(), implicit)
+ markbreaklist(n.Body(), implicit)
+ markbreaklist(n.List(), implicit)
+ markbreaklist(n.Rlist(), implicit)
}
}
if n == nil {
continue
}
- if n.Op == ir.OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] {
- switch n.Name.Defn.Op {
+ if n.Op() == ir.OLABEL && i+1 < len(s) && n.Name().Defn == s[i+1] {
+ switch n.Name().Defn.Op() {
case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OTYPESW, ir.OSELECT, ir.ORANGE:
- n.Sym.Label = ir.AsTypesNode(n.Name.Defn)
- markbreak(n.Name.Defn, n.Name.Defn)
- n.Sym.Label = nil
+ n.Sym().Label = ir.AsTypesNode(n.Name().Defn)
+ markbreak(n.Name().Defn, n.Name().Defn)
+ n.Sym().Label = nil
i++
continue
}
// Isterminating reports whether the node n, the last one in a
// statement list, is a terminating statement.
func isTermNode(n *ir.Node) bool {
- switch n.Op {
+ switch n.Op() {
// NOTE: OLABEL is treated as a separate statement,
// not a separate prefix, so skipping to the last statement
// in the block handles the labeled statement case by
// skipping over the label. No case OLABEL here.
case ir.OBLOCK:
- return isTermNodes(n.List)
+ return isTermNodes(n.List())
case ir.OGOTO, ir.ORETURN, ir.ORETJMP, ir.OPANIC, ir.OFALL:
return true
case ir.OFOR, ir.OFORUNTIL:
- if n.Left != nil {
+ if n.Left() != nil {
return false
}
if n.HasBreak() {
return true
case ir.OIF:
- return isTermNodes(n.Nbody) && isTermNodes(n.Rlist)
+ return isTermNodes(n.Body()) && isTermNodes(n.Rlist())
case ir.OSWITCH, ir.OTYPESW, ir.OSELECT:
if n.HasBreak() {
return false
}
def := false
- for _, n1 := range n.List.Slice() {
- if !isTermNodes(n1.Nbody) {
+ for _, n1 := range n.List().Slice() {
+ if !isTermNodes(n1.Body()) {
return false
}
- if n1.List.Len() == 0 { // default
+ if n1.List().Len() == 0 { // default
def = true
}
}
- if n.Op != ir.OSELECT && !def {
+ if n.Op() != ir.OSELECT && !def {
return false
}
return true
// checkreturn makes sure that fn terminates appropriately.
func checkreturn(fn *ir.Node) {
- if fn.Type.NumResults() != 0 && fn.Nbody.Len() != 0 {
- markbreaklist(fn.Nbody, nil)
- if !isTermNodes(fn.Nbody) {
- base.ErrorfAt(fn.Func.Endlineno, "missing return at end of function")
+ if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 {
+ markbreaklist(fn.Body(), nil)
+ if !isTermNodes(fn.Body()) {
+ base.ErrorfAt(fn.Func().Endlineno, "missing return at end of function")
}
}
}
func deadcode(fn *ir.Node) {
- deadcodeslice(&fn.Nbody)
+ deadcodeslice(fn.PtrBody())
deadcodefn(fn)
}
func deadcodefn(fn *ir.Node) {
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
return
}
- for _, n := range fn.Nbody.Slice() {
- if n.Ninit.Len() > 0 {
+ for _, n := range fn.Body().Slice() {
+ if n.Init().Len() > 0 {
return
}
- switch n.Op {
+ switch n.Op() {
case ir.OIF:
- if !ir.IsConst(n.Left, constant.Bool) || n.Nbody.Len() > 0 || n.Rlist.Len() > 0 {
+ if !ir.IsConst(n.Left(), constant.Bool) || n.Body().Len() > 0 || n.Rlist().Len() > 0 {
return
}
case ir.OFOR:
- if !ir.IsConst(n.Left, constant.Bool) || n.Left.BoolVal() {
+ if !ir.IsConst(n.Left(), constant.Bool) || n.Left().BoolVal() {
return
}
default:
}
}
- fn.Nbody.Set([]*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)})
+ fn.PtrBody().Set([]*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)})
}
func deadcodeslice(nn *ir.Nodes) {
var lastLabel = -1
for i, n := range nn.Slice() {
- if n != nil && n.Op == ir.OLABEL {
+ if n != nil && n.Op() == ir.OLABEL {
lastLabel = i
}
}
if n == nil {
continue
}
- if n.Op == ir.OIF {
- n.Left = deadcodeexpr(n.Left)
- if ir.IsConst(n.Left, constant.Bool) {
+ if n.Op() == ir.OIF {
+ n.SetLeft(deadcodeexpr(n.Left()))
+ if ir.IsConst(n.Left(), constant.Bool) {
var body ir.Nodes
- if n.Left.BoolVal() {
- n.Rlist = ir.Nodes{}
- body = n.Nbody
+ if n.Left().BoolVal() {
+ n.SetRlist(ir.Nodes{})
+ body = n.Body()
} else {
- n.Nbody = ir.Nodes{}
- body = n.Rlist
+ n.SetBody(ir.Nodes{})
+ body = n.Rlist()
}
// If "then" or "else" branch ends with panic or return statement,
// it is safe to remove all statements after this node.
// We must be careful not to deadcode-remove labels, as they
// might be the target of a goto. See issue 28616.
if body := body.Slice(); len(body) != 0 {
- switch body[(len(body) - 1)].Op {
+ switch body[(len(body) - 1)].Op() {
case ir.ORETURN, ir.ORETJMP, ir.OPANIC:
if i > lastLabel {
cut = true
}
}
- deadcodeslice(&n.Ninit)
- deadcodeslice(&n.Nbody)
- deadcodeslice(&n.List)
- deadcodeslice(&n.Rlist)
+ deadcodeslice(n.PtrInit())
+ deadcodeslice(n.PtrBody())
+ deadcodeslice(n.PtrList())
+ deadcodeslice(n.PtrRlist())
if cut {
nn.Set(nn.Slice()[:i+1])
break
// Perform dead-code elimination on short-circuited boolean
// expressions involving constants with the intent of
// producing a constant 'if' condition.
- switch n.Op {
+ switch n.Op() {
case ir.OANDAND:
- n.Left = deadcodeexpr(n.Left)
- n.Right = deadcodeexpr(n.Right)
- if ir.IsConst(n.Left, constant.Bool) {
- if n.Left.BoolVal() {
- return n.Right // true && x => x
+ n.SetLeft(deadcodeexpr(n.Left()))
+ n.SetRight(deadcodeexpr(n.Right()))
+ if ir.IsConst(n.Left(), constant.Bool) {
+ if n.Left().BoolVal() {
+ return n.Right() // true && x => x
} else {
- return n.Left // false && x => false
+ return n.Left() // false && x => false
}
}
case ir.OOROR:
- n.Left = deadcodeexpr(n.Left)
- n.Right = deadcodeexpr(n.Right)
- if ir.IsConst(n.Left, constant.Bool) {
- if n.Left.BoolVal() {
- return n.Left // true || x => true
+ n.SetLeft(deadcodeexpr(n.Left()))
+ n.SetRight(deadcodeexpr(n.Right()))
+ if ir.IsConst(n.Left(), constant.Bool) {
+ if n.Left().BoolVal() {
+ return n.Left() // true || x => true
} else {
- return n.Right // false || x => x
+ return n.Right() // false || x => x
}
}
}
// setTypeNode sets n to an OTYPE node representing t.
func setTypeNode(n *ir.Node, t *types.Type) {
- n.Op = ir.OTYPE
- n.Type = t
- n.Type.Nod = ir.AsTypesNode(n)
+ n.SetOp(ir.OTYPE)
+ n.SetType(t)
+ n.Type().Nod = ir.AsTypesNode(n)
}
// getIotaValue returns the current value for "iota",
// or -1 if not within a ConstSpec.
func getIotaValue() int64 {
if i := len(typecheckdefstack); i > 0 {
- if x := typecheckdefstack[i-1]; x.Op == ir.OLITERAL {
+ if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL {
return x.Iota()
}
}
// TODO(mdempsky): Standardize on either ODCLFUNC or ONAME for
// Curfn, rather than mixing them.
- if fn.Op == ir.ODCLFUNC {
- fn = fn.Func.Nname
+ if fn.Op() == ir.ODCLFUNC {
+ fn = fn.Func().Nname
}
return fnpkg(fn)
// MethodFunc is like MethodName, but returns the types.Field instead.
func methodExprFunc(n *ir.Node) *types.Field {
- switch n.Op {
+ switch n.Op() {
case ir.ODOTMETH, ir.OMETHEXPR:
return n.Opt().(*types.Field)
case ir.OCALLPART:
return callpartMethod(n)
}
- base.Fatalf("unexpected node: %v (%v)", n, n.Op)
+ base.Fatalf("unexpected node: %v (%v)", n, n.Op())
panic("unreachable")
}
types.Types[etype] = t
}
s2.Def = ir.AsTypesNode(typenod(t))
- ir.AsNode(s2.Def).Name = new(ir.Name)
+ ir.AsNode(s2.Def).SetName(new(ir.Name))
}
for _, s := range &builtinFuncs {
s := ir.BuiltinPkg.Lookup("true")
s.Def = ir.AsTypesNode(nodbool(true))
- ir.AsNode(s.Def).Sym = lookup("true")
- ir.AsNode(s.Def).Name = new(ir.Name)
- ir.AsNode(s.Def).Type = types.UntypedBool
+ ir.AsNode(s.Def).SetSym(lookup("true"))
+ ir.AsNode(s.Def).SetName(new(ir.Name))
+ ir.AsNode(s.Def).SetType(types.UntypedBool)
s = ir.BuiltinPkg.Lookup("false")
s.Def = ir.AsTypesNode(nodbool(false))
- ir.AsNode(s.Def).Sym = lookup("false")
- ir.AsNode(s.Def).Name = new(ir.Name)
- ir.AsNode(s.Def).Type = types.UntypedBool
+ ir.AsNode(s.Def).SetSym(lookup("false"))
+ ir.AsNode(s.Def).SetName(new(ir.Name))
+ ir.AsNode(s.Def).SetType(types.UntypedBool)
s = lookup("_")
s.Block = -100
s.Def = ir.AsTypesNode(NewName(s))
types.Types[types.TBLANK] = types.New(types.TBLANK)
- ir.AsNode(s.Def).Type = types.Types[types.TBLANK]
+ ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
ir.BlankNode = ir.AsNode(s.Def)
s = ir.BuiltinPkg.Lookup("_")
s.Block = -100
s.Def = ir.AsTypesNode(NewName(s))
types.Types[types.TBLANK] = types.New(types.TBLANK)
- ir.AsNode(s.Def).Type = types.Types[types.TBLANK]
+ ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
types.Types[types.TNIL] = types.New(types.TNIL)
s = ir.BuiltinPkg.Lookup("nil")
s.Def = ir.AsTypesNode(nodnil())
- ir.AsNode(s.Def).Sym = s
- ir.AsNode(s.Def).Name = new(ir.Name)
+ ir.AsNode(s.Def).SetSym(s)
+ ir.AsNode(s.Def).SetName(new(ir.Name))
s = ir.BuiltinPkg.Lookup("iota")
s.Def = ir.AsTypesNode(ir.Nod(ir.OIOTA, nil, nil))
- ir.AsNode(s.Def).Sym = s
- ir.AsNode(s.Def).Name = new(ir.Name)
+ ir.AsNode(s.Def).SetSym(s)
+ ir.AsNode(s.Def).SetName(new(ir.Name))
}
func typeinit() {
types.Types[types.TUNSAFEPTR] = t
t.Sym = unsafepkg.Lookup("Pointer")
t.Sym.Def = ir.AsTypesNode(typenod(t))
- ir.AsNode(t.Sym.Def).Name = new(ir.Name)
+ ir.AsNode(t.Sym.Def).SetName(new(ir.Name))
dowidth(types.Types[types.TUNSAFEPTR])
for et := types.TINT8; et <= types.TUINT64; et++ {
types.Bytetype = types.New(types.TUINT8)
types.Bytetype.Sym = s
s.Def = ir.AsTypesNode(typenod(types.Bytetype))
- ir.AsNode(s.Def).Name = new(ir.Name)
+ ir.AsNode(s.Def).SetName(new(ir.Name))
dowidth(types.Bytetype)
// rune alias
types.Runetype = types.New(types.TINT32)
types.Runetype.Sym = s
s.Def = ir.AsTypesNode(typenod(types.Runetype))
- ir.AsNode(s.Def).Name = new(ir.Name)
+ ir.AsNode(s.Def).SetName(new(ir.Name))
dowidth(types.Runetype)
// backend-dependent builtin types (e.g. int).
t.Sym = s1
types.Types[s.etype] = t
s1.Def = ir.AsTypesNode(typenod(t))
- ir.AsNode(s1.Def).Name = new(ir.Name)
+ ir.AsNode(s1.Def).SetName(new(ir.Name))
s1.Origpkg = ir.BuiltinPkg
dowidth(t)
}
nodfp = NewName(lookup(".fp"))
- nodfp.Type = types.Types[types.TINT32]
+ nodfp.SetType(types.Types[types.TINT32])
nodfp.SetClass(ir.PPARAM)
- nodfp.Name.SetUsed(true)
+ nodfp.Name().SetUsed(true)
}
// evalunsafe evaluates a package unsafe operation and returns the result.
func evalunsafe(n *ir.Node) int64 {
- switch n.Op {
+ switch n.Op() {
case ir.OALIGNOF, ir.OSIZEOF:
- n.Left = typecheck(n.Left, ctxExpr)
- n.Left = defaultlit(n.Left, nil)
- tr := n.Left.Type
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ n.SetLeft(defaultlit(n.Left(), nil))
+ tr := n.Left().Type()
if tr == nil {
return 0
}
dowidth(tr)
- if n.Op == ir.OALIGNOF {
+ if n.Op() == ir.OALIGNOF {
return int64(tr.Align)
}
return tr.Width
case ir.OOFFSETOF:
// must be a selector.
- if n.Left.Op != ir.OXDOT {
+ if n.Left().Op() != ir.OXDOT {
base.Errorf("invalid expression %v", n)
return 0
}
// Remember base of selector to find it back after dot insertion.
// Since r->left may be mutated by typechecking, check it explicitly
// first to track it correctly.
- n.Left.Left = typecheck(n.Left.Left, ctxExpr)
- sbase := n.Left.Left
+ n.Left().SetLeft(typecheck(n.Left().Left(), ctxExpr))
+ sbase := n.Left().Left()
- n.Left = typecheck(n.Left, ctxExpr)
- if n.Left.Type == nil {
+ n.SetLeft(typecheck(n.Left(), ctxExpr))
+ if n.Left().Type() == nil {
return 0
}
- switch n.Left.Op {
+ switch n.Left().Op() {
case ir.ODOT, ir.ODOTPTR:
break
case ir.OCALLPART:
// Sum offsets for dots until we reach sbase.
var v int64
- for r := n.Left; r != sbase; r = r.Left {
- switch r.Op {
+ for r := n.Left(); r != sbase; r = r.Left() {
+ switch r.Op() {
case ir.ODOTPTR:
// For Offsetof(s.f), s may itself be a pointer,
// but accessing f must not otherwise involve
// indirection via embedded pointer types.
- if r.Left != sbase {
- base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left)
+ if r.Left() != sbase {
+ base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left())
return 0
}
fallthrough
case ir.ODOT:
- v += r.Xoffset
+ v += r.Offset()
default:
- ir.Dump("unsafenmagic", n.Left)
- base.Fatalf("impossible %#v node after dot insertion", r.Op)
+ ir.Dump("unsafenmagic", n.Left())
+ base.Fatalf("impossible %#v node after dot insertion", r.Op())
}
}
return v
}
- base.Fatalf("unexpected op %v", n.Op)
+ base.Fatalf("unexpected op %v", n.Op())
return 0
}
errorsBefore := base.Errors()
if base.Flag.W != 0 {
- s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
- ir.DumpList(s, Curfn.Nbody)
+ s := fmt.Sprintf("\nbefore walk %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Body())
}
lno := base.Pos
// Final typecheck for any unused variables.
- for i, ln := range fn.Func.Dcl {
- if ln.Op == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) {
+ for i, ln := range fn.Func().Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) {
ln = typecheck(ln, ctxExpr|ctxAssign)
- fn.Func.Dcl[i] = ln
+ fn.Func().Dcl[i] = ln
}
}
// Propagate the used flag for typeswitch variables up to the NONAME in its definition.
- for _, ln := range fn.Func.Dcl {
- if ln.Op == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == ir.OTYPESW && ln.Name.Used() {
- ln.Name.Defn.Left.Name.SetUsed(true)
+ for _, ln := range fn.Func().Dcl {
+ if ln.Op() == ir.ONAME && (ln.Class() == ir.PAUTO || ln.Class() == ir.PAUTOHEAP) && ln.Name().Defn != nil && ln.Name().Defn.Op() == ir.OTYPESW && ln.Name().Used() {
+ ln.Name().Defn.Left().Name().SetUsed(true)
}
}
- for _, ln := range fn.Func.Dcl {
- if ln.Op != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
+ for _, ln := range fn.Func().Dcl {
+ if ln.Op() != ir.ONAME || (ln.Class() != ir.PAUTO && ln.Class() != ir.PAUTOHEAP) || ln.Sym().Name[0] == '&' || ln.Name().Used() {
continue
}
- if defn := ln.Name.Defn; defn != nil && defn.Op == ir.OTYPESW {
- if defn.Left.Name.Used() {
+ if defn := ln.Name().Defn; defn != nil && defn.Op() == ir.OTYPESW {
+ if defn.Left().Name().Used() {
continue
}
- base.ErrorfAt(defn.Left.Pos, "%v declared but not used", ln.Sym)
- defn.Left.Name.SetUsed(true) // suppress repeats
+ base.ErrorfAt(defn.Left().Pos(), "%v declared but not used", ln.Sym())
+ defn.Left().Name().SetUsed(true) // suppress repeats
} else {
- base.ErrorfAt(ln.Pos, "%v declared but not used", ln.Sym)
+ base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
}
}
if base.Errors() > errorsBefore {
return
}
- walkstmtlist(Curfn.Nbody.Slice())
+ walkstmtlist(Curfn.Body().Slice())
if base.Flag.W != 0 {
- s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
- ir.DumpList(s, Curfn.Nbody)
+ s := fmt.Sprintf("after walk %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Body())
}
zeroResults()
heapmoves()
- if base.Flag.W != 0 && Curfn.Func.Enter.Len() > 0 {
- s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
- ir.DumpList(s, Curfn.Func.Enter)
+ if base.Flag.W != 0 && Curfn.Func().Enter.Len() > 0 {
+ s := fmt.Sprintf("enter %v", Curfn.Func().Nname.Sym())
+ ir.DumpList(s, Curfn.Func().Enter)
}
}
}
func paramoutheap(fn *ir.Node) bool {
- for _, ln := range fn.Func.Dcl {
+ for _, ln := range fn.Func().Dcl {
switch ln.Class() {
case ir.PPARAMOUT:
- if isParamStackCopy(ln) || ln.Name.Addrtaken() {
+ if isParamStackCopy(ln) || ln.Name().Addrtaken() {
return true
}
setlineno(n)
- walkstmtlist(n.Ninit.Slice())
+ walkstmtlist(n.Init().Slice())
- switch n.Op {
+ switch n.Op() {
default:
- if n.Op == ir.ONAME {
- base.Errorf("%v is not a top level statement", n.Sym)
+ if n.Op() == ir.ONAME {
+ base.Errorf("%v is not a top level statement", n.Sym())
} else {
- base.Errorf("%v is not a top level statement", n.Op)
+ base.Errorf("%v is not a top level statement", n.Op())
}
ir.Dump("nottop", n)
if n.Typecheck() == 0 {
base.Fatalf("missing typecheck: %+v", n)
}
- wascopy := n.Op == ir.OCOPY
- init := n.Ninit
- n.Ninit.Set(nil)
+ wascopy := n.Op() == ir.OCOPY
+ init := n.Init()
+ n.PtrInit().Set(nil)
n = walkexpr(n, &init)
n = addinit(n, init.Slice())
- if wascopy && n.Op == ir.OCONVNOP {
- n.Op = ir.OEMPTY // don't leave plain values as statements.
+ if wascopy && n.Op() == ir.OCONVNOP {
+ n.SetOp(ir.OEMPTY) // don't leave plain values as statements.
}
// special case for a receive where we throw away
if n.Typecheck() == 0 {
base.Fatalf("missing typecheck: %+v", n)
}
- init := n.Ninit
- n.Ninit.Set(nil)
+ init := n.Init()
+ n.PtrInit().Set(nil)
- n.Left = walkexpr(n.Left, &init)
- n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil())
+ n.SetLeft(walkexpr(n.Left(), &init))
+ n = mkcall1(chanfn("chanrecv1", 2, n.Left().Type()), nil, &init, n.Left(), nodnil())
n = walkexpr(n, &init)
n = addinit(n, init.Slice())
break
case ir.ODCL:
- v := n.Left
+ v := n.Left()
if v.Class() == ir.PAUTOHEAP {
if base.Flag.CompilingRuntime {
base.Errorf("%v escapes to heap, not allowed in runtime", v)
}
if prealloc[v] == nil {
- prealloc[v] = callnew(v.Type)
+ prealloc[v] = callnew(v.Type())
}
- nn := ir.Nod(ir.OAS, v.Name.Param.Heapaddr, prealloc[v])
+ nn := ir.Nod(ir.OAS, v.Name().Param.Heapaddr, prealloc[v])
nn.SetColas(true)
nn = typecheck(nn, ctxStmt)
return walkstmt(nn)
}
case ir.OBLOCK:
- walkstmtlist(n.List.Slice())
+ walkstmtlist(n.List().Slice())
case ir.OCASE:
base.Errorf("case statement out of place")
case ir.ODEFER:
- Curfn.Func.SetHasDefer(true)
- Curfn.Func.NumDefers++
- if Curfn.Func.NumDefers > maxOpenDefers {
+ Curfn.Func().SetHasDefer(true)
+ Curfn.Func().NumDefers++
+ if Curfn.Func().NumDefers > maxOpenDefers {
// Don't allow open-coded defers if there are more than
// 8 defers in the function, since we use a single
// byte to record active defers.
- Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ Curfn.Func().SetOpenCodedDeferDisallowed(true)
}
- if n.Esc != EscNever {
+ if n.Esc() != EscNever {
// If n.Esc is not EscNever, then this defer occurs in a loop,
// so open-coded defers cannot be used in this function.
- Curfn.Func.SetOpenCodedDeferDisallowed(true)
+ Curfn.Func().SetOpenCodedDeferDisallowed(true)
}
fallthrough
case ir.OGO:
- switch n.Left.Op {
+ switch n.Left().Op() {
case ir.OPRINT, ir.OPRINTN:
- n.Left = wrapCall(n.Left, &n.Ninit)
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
case ir.ODELETE:
- if mapfast(n.Left.List.First().Type) == mapslow {
- n.Left = wrapCall(n.Left, &n.Ninit)
+ if mapfast(n.Left().List().First().Type()) == mapslow {
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
} else {
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
case ir.OCOPY:
- n.Left = copyany(n.Left, &n.Ninit, true)
+ n.SetLeft(copyany(n.Left(), n.PtrInit(), true))
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- if n.Left.Nbody.Len() > 0 {
- n.Left = wrapCall(n.Left, &n.Ninit)
+ if n.Left().Body().Len() > 0 {
+ n.SetLeft(wrapCall(n.Left(), n.PtrInit()))
} else {
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
default:
- n.Left = walkexpr(n.Left, &n.Ninit)
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
}
case ir.OFOR, ir.OFORUNTIL:
- if n.Left != nil {
- walkstmtlist(n.Left.Ninit.Slice())
- init := n.Left.Ninit
- n.Left.Ninit.Set(nil)
- n.Left = walkexpr(n.Left, &init)
- n.Left = addinit(n.Left, init.Slice())
+ if n.Left() != nil {
+ walkstmtlist(n.Left().Init().Slice())
+ init := n.Left().Init()
+ n.Left().PtrInit().Set(nil)
+ n.SetLeft(walkexpr(n.Left(), &init))
+ n.SetLeft(addinit(n.Left(), init.Slice()))
}
- n.Right = walkstmt(n.Right)
- if n.Op == ir.OFORUNTIL {
- walkstmtlist(n.List.Slice())
+ n.SetRight(walkstmt(n.Right()))
+ if n.Op() == ir.OFORUNTIL {
+ walkstmtlist(n.List().Slice())
}
- walkstmtlist(n.Nbody.Slice())
+ walkstmtlist(n.Body().Slice())
case ir.OIF:
- n.Left = walkexpr(n.Left, &n.Ninit)
- walkstmtlist(n.Nbody.Slice())
- walkstmtlist(n.Rlist.Slice())
+ n.SetLeft(walkexpr(n.Left(), n.PtrInit()))
+ walkstmtlist(n.Body().Slice())
+ walkstmtlist(n.Rlist().Slice())
case ir.ORETURN:
- Curfn.Func.NumReturns++
- if n.List.Len() == 0 {
+ Curfn.Func().NumReturns++
+ if n.List().Len() == 0 {
break
}
- if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) {
+ if (Curfn.Type().FuncType().Outnamed && n.List().Len() > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
// so that reorder3 can fix up conflicts
var rl []*ir.Node
- for _, ln := range Curfn.Func.Dcl {
+ for _, ln := range Curfn.Func().Dcl {
cl := ln.Class()
if cl == ir.PAUTO || cl == ir.PAUTOHEAP {
break
}
if cl == ir.PPARAMOUT {
if isParamStackCopy(ln) {
- ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name.Param.Heapaddr, nil), ctxExpr), nil)
+ ln = walkexpr(typecheck(ir.Nod(ir.ODEREF, ln.Name().Param.Heapaddr, nil), ctxExpr), nil)
}
rl = append(rl, ln)
}
}
- if got, want := n.List.Len(), len(rl); got != want {
+ if got, want := n.List().Len(), len(rl); got != want {
// order should have rewritten multi-value function calls
// with explicit OAS2FUNC nodes.
base.Fatalf("expected %v return arguments, have %v", want, got)
}
// move function calls out, to make reorder3's job easier.
- walkexprlistsafe(n.List.Slice(), &n.Ninit)
+ walkexprlistsafe(n.List().Slice(), n.PtrInit())
- ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
- n.List.Set(reorder3(ll))
+ ll := ascompatee(n.Op(), rl, n.List().Slice(), n.PtrInit())
+ n.PtrList().Set(reorder3(ll))
break
}
- walkexprlist(n.List.Slice(), &n.Ninit)
+ walkexprlist(n.List().Slice(), n.PtrInit())
// For each return parameter (lhs), assign the corresponding result (rhs).
- lhs := Curfn.Type.Results()
- rhs := n.List.Slice()
+ lhs := Curfn.Type().Results()
+ rhs := n.List().Slice()
res := make([]*ir.Node, lhs.NumFields())
for i, nl := range lhs.FieldSlice() {
nname := ir.AsNode(nl.Nname)
if isParamHeapCopy(nname) {
- nname = nname.Name.Param.Stackcopy
+ nname = nname.Name().Param.Stackcopy
}
a := ir.Nod(ir.OAS, nname, rhs[i])
- res[i] = convas(a, &n.Ninit)
+ res[i] = convas(a, n.PtrInit())
}
- n.List.Set(res)
+ n.PtrList().Set(res)
case ir.ORETJMP:
break
n = walkrange(n)
}
- if n.Op == ir.ONAME {
+ if n.Op() == ir.ONAME {
base.Fatalf("walkstmt ended up with name: %+v", n)
}
return n
}
// Eagerly checkwidth all expressions for the back end.
- if n.Type != nil && !n.Type.WidthCalculated() {
- switch n.Type.Etype {
+ if n.Type() != nil && !n.Type().WidthCalculated() {
+ switch n.Type().Etype {
case types.TBLANK, types.TNIL, types.TIDEAL:
default:
- checkwidth(n.Type)
+ checkwidth(n.Type())
}
}
- if init == &n.Ninit {
+ if init == n.PtrInit() {
// not okay to use n->ninit when walking n,
// because we might replace n with some other node
// and would lose the init list.
base.Fatalf("walkexpr init == &n->ninit")
}
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
lno := setlineno(n)
base.Fatalf("missed typecheck: %+v", n)
}
- if n.Type.IsUntyped() {
+ if n.Type().IsUntyped() {
base.Fatalf("expression has untyped type: %+v", n)
}
- if n.Op == ir.ONAME && n.Class() == ir.PAUTOHEAP {
- nn := ir.Nod(ir.ODEREF, n.Name.Param.Heapaddr, nil)
+ if n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP {
+ nn := ir.Nod(ir.ODEREF, n.Name().Param.Heapaddr, nil)
nn = typecheck(nn, ctxExpr)
nn = walkexpr(nn, init)
- nn.Left.MarkNonNil()
+ nn.Left().MarkNonNil()
return nn
}
opswitch:
- switch n.Op {
+ switch n.Op() {
default:
ir.Dump("walk", n)
base.Fatalf("walkexpr: switch 1 unknown op %+S", n)
case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.ODOTMETH, ir.ODOTINTER,
ir.ODEREF, ir.OSPTR, ir.OITAB, ir.OIDATA, ir.OADDR:
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
case ir.ODOT, ir.ODOTPTR:
usefield(n)
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
case ir.ODOTTYPE, ir.ODOTTYPE2:
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
// Set up interface type addresses for back end.
- n.Right = typename(n.Type)
- if n.Op == ir.ODOTTYPE {
- n.Right.Right = typename(n.Left.Type)
+ n.SetRight(typename(n.Type()))
+ if n.Op() == ir.ODOTTYPE {
+ n.Right().SetRight(typename(n.Left().Type()))
}
- if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
- n.List.Set1(itabname(n.Type, n.Left.Type))
+ if !n.Type().IsInterface() && !n.Left().Type().IsEmptyInterface() {
+ n.PtrList().Set1(itabname(n.Type(), n.Left().Type()))
}
case ir.OLEN, ir.OCAP:
if isRuneCount(n) {
// Replace len([]rune(string)) with runtime.countrunes(string).
- n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[types.TSTRING]))
+ n = mkcall("countrunes", n.Type(), init, conv(n.Left().Left(), types.Types[types.TSTRING]))
break
}
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
// replace len(*[10]int) with 10.
// delayed until now to preserve side effects.
- t := n.Left.Type
+ t := n.Left().Type()
if t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
- safeexpr(n.Left, init)
+ safeexpr(n.Left(), init)
n = origIntConst(n, t.NumElem())
n.SetTypecheck(1)
}
case ir.OCOMPLEX:
// Use results from call expression as arguments for complex.
- if n.Left == nil && n.Right == nil {
- n.Left = n.List.First()
- n.Right = n.List.Second()
+ if n.Left() == nil && n.Right() == nil {
+ n.SetLeft(n.List().First())
+ n.SetRight(n.List().Second())
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
n = walkcompare(n, init)
case ir.OANDAND, ir.OOROR:
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
// cannot put side effects from n.Right on init,
// because they cannot run before n.Left is checked.
// save elsewhere and store on the eventual n.Right.
var ll ir.Nodes
- n.Right = walkexpr(n.Right, &ll)
- n.Right = addinit(n.Right, ll.Slice())
+ n.SetRight(walkexpr(n.Right(), &ll))
+ n.SetRight(addinit(n.Right(), ll.Slice()))
case ir.OPRINT, ir.OPRINTN:
n = walkprint(n, init)
case ir.OPANIC:
- n = mkcall("gopanic", nil, init, n.Left)
+ n = mkcall("gopanic", nil, init, n.Left())
case ir.ORECOVER:
- n = mkcall("gorecover", n.Type, init, ir.Nod(ir.OADDR, nodfp, nil))
+ n = mkcall("gorecover", n.Type(), init, ir.Nod(ir.OADDR, nodfp, nil))
case ir.OCLOSUREVAR, ir.OCFUNC:
case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
- if n.Op == ir.OCALLINTER {
+ if n.Op() == ir.OCALLINTER {
usemethod(n)
markUsedIfaceMethod(n)
}
- if n.Op == ir.OCALLFUNC && n.Left.Op == ir.OCLOSURE {
+ if n.Op() == ir.OCALLFUNC && n.Left().Op() == ir.OCLOSURE {
// Transform direct call of a closure to call of a normal function.
// transformclosure already did all preparation work.
// Prepend captured variables to argument list.
- n.List.Prepend(n.Left.Func.ClosureEnter.Slice()...)
- n.Left.Func.ClosureEnter.Set(nil)
+ n.PtrList().Prepend(n.Left().Func().ClosureEnter.Slice()...)
+ n.Left().Func().ClosureEnter.Set(nil)
// Replace OCLOSURE with ONAME/PFUNC.
- n.Left = n.Left.Func.Nname
+ n.SetLeft(n.Left().Func().Nname)
// Update type of OCALLFUNC node.
// Output arguments had not changed, but their offsets could.
- if n.Left.Type.NumResults() == 1 {
- n.Type = n.Left.Type.Results().Field(0).Type
+ if n.Left().Type().NumResults() == 1 {
+ n.SetType(n.Left().Type().Results().Field(0).Type)
} else {
- n.Type = n.Left.Type.Results()
+ n.SetType(n.Left().Type().Results())
}
}
walkCall(n, init)
case ir.OAS, ir.OASOP:
- init.AppendNodes(&n.Ninit)
+ init.AppendNodes(n.PtrInit())
// Recognize m[k] = append(m[k], ...) so we can reuse
// the mapassign call.
- mapAppend := n.Left.Op == ir.OINDEXMAP && n.Right.Op == ir.OAPPEND
- if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) {
- base.Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First())
+ mapAppend := n.Left().Op() == ir.OINDEXMAP && n.Right().Op() == ir.OAPPEND
+ if mapAppend && !samesafeexpr(n.Left(), n.Right().List().First()) {
+ base.Fatalf("not same expressions: %v != %v", n.Left(), n.Right().List().First())
}
- n.Left = walkexpr(n.Left, init)
- n.Left = safeexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetLeft(safeexpr(n.Left(), init))
if mapAppend {
- n.Right.List.SetFirst(n.Left)
+ n.Right().List().SetFirst(n.Left())
}
- if n.Op == ir.OASOP {
+ if n.Op() == ir.OASOP {
// Rewrite x op= y into x = x op y.
- n.Right = ir.Nod(n.SubOp(), n.Left, n.Right)
- n.Right = typecheck(n.Right, ctxExpr)
+ n.SetRight(ir.Nod(n.SubOp(), n.Left(), n.Right()))
+ n.SetRight(typecheck(n.Right(), ctxExpr))
- n.Op = ir.OAS
+ n.SetOp(ir.OAS)
n.ResetAux()
}
break
}
- if n.Right == nil {
+ if n.Right() == nil {
// TODO(austin): Check all "implicit zeroing"
break
}
- if !instrumenting && isZero(n.Right) {
+ if !instrumenting && isZero(n.Right()) {
break
}
- switch n.Right.Op {
+ switch n.Right().Op() {
default:
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(walkexpr(n.Right(), init))
case ir.ORECV:
// x = <-c; n.Left is x, n.Right.Left is c.
// order.stmt made sure x is addressable.
- n.Right.Left = walkexpr(n.Right.Left, init)
+ n.Right().SetLeft(walkexpr(n.Right().Left(), init))
- n1 := ir.Nod(ir.OADDR, n.Left, nil)
- r := n.Right.Left // the channel
- n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1)
+ n1 := ir.Nod(ir.OADDR, n.Left(), nil)
+ r := n.Right().Left() // the channel
+ n = mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
n = walkexpr(n, init)
break opswitch
case ir.OAPPEND:
// x = append(...)
- r := n.Right
- if r.Type.Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem())
+ r := n.Right()
+ if r.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type().Elem())
}
switch {
case isAppendOfMake(r):
default:
r = walkappend(r, init, n)
}
- n.Right = r
- if r.Op == ir.OAPPEND {
+ n.SetRight(r)
+ if r.Op() == ir.OAPPEND {
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
- r.Left = typename(r.Type.Elem())
+ r.SetLeft(typename(r.Type().Elem()))
break opswitch
}
// Otherwise, lowered for race detector.
// Treat as ordinary assignment.
}
- if n.Left != nil && n.Right != nil {
+ if n.Left() != nil && n.Right() != nil {
n = convas(n, init)
}
case ir.OAS2:
- init.AppendNodes(&n.Ninit)
- walkexprlistsafe(n.List.Slice(), init)
- walkexprlistsafe(n.Rlist.Slice(), init)
- ll := ascompatee(ir.OAS, n.List.Slice(), n.Rlist.Slice(), init)
+ init.AppendNodes(n.PtrInit())
+ walkexprlistsafe(n.List().Slice(), init)
+ walkexprlistsafe(n.Rlist().Slice(), init)
+ ll := ascompatee(ir.OAS, n.List().Slice(), n.Rlist().Slice(), init)
ll = reorder3(ll)
n = liststmt(ll)
// a,b,... = fn()
case ir.OAS2FUNC:
- init.AppendNodes(&n.Ninit)
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
r = walkexpr(r, init)
if isIntrinsicCall(r) {
- n.Right = r
+ n.SetRight(r)
break
}
init.Append(r)
- ll := ascompatet(n.List, r.Type)
+ ll := ascompatet(n.List(), r.Type())
n = liststmt(ll)
// x, y = <-c
// order.stmt made sure x is addressable or blank.
case ir.OAS2RECV:
- init.AppendNodes(&n.Ninit)
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
- r.Left = walkexpr(r.Left, init)
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
+ r.SetLeft(walkexpr(r.Left(), init))
var n1 *ir.Node
- if ir.IsBlank(n.List.First()) {
+ if ir.IsBlank(n.List().First()) {
n1 = nodnil()
} else {
- n1 = ir.Nod(ir.OADDR, n.List.First(), nil)
+ n1 = ir.Nod(ir.OADDR, n.List().First(), nil)
}
- fn := chanfn("chanrecv2", 2, r.Left.Type)
- ok := n.List.Second()
- call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left, n1)
+ fn := chanfn("chanrecv2", 2, r.Left().Type())
+ ok := n.List().Second()
+ call := mkcall1(fn, types.Types[types.TBOOL], init, r.Left(), n1)
n = ir.Nod(ir.OAS, ok, call)
n = typecheck(n, ctxStmt)
// a,b = m[i]
case ir.OAS2MAPR:
- init.AppendNodes(&n.Ninit)
+ init.AppendNodes(n.PtrInit())
- r := n.Right
- walkexprlistsafe(n.List.Slice(), init)
- r.Left = walkexpr(r.Left, init)
- r.Right = walkexpr(r.Right, init)
- t := r.Left.Type
+ r := n.Right()
+ walkexprlistsafe(n.List().Slice(), init)
+ r.SetLeft(walkexpr(r.Left(), init))
+ r.SetRight(walkexpr(r.Right(), init))
+ t := r.Left().Type()
fast := mapfast(t)
var key *ir.Node
if fast != mapslow {
// fast versions take key by value
- key = r.Right
+ key = r.Right()
} else {
// standard version takes key by reference
// order.expr made sure key is addressable.
- key = ir.Nod(ir.OADDR, r.Right, nil)
+ key = ir.Nod(ir.OADDR, r.Right(), nil)
}
// from:
// to:
// var,b = mapaccess2*(t, m, i)
// a = *var
- a := n.List.First()
+ a := n.List().First()
if w := t.Elem().Width; w <= zeroValSize {
fn := mapfn(mapaccess2[fast], t)
- r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
+ r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key)
} else {
fn := mapfn("mapaccess2_fat", t)
z := zeroaddr(w)
- r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
+ r = mkcall1(fn, fn.Type().Results(), init, typename(t), r.Left(), key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
// the boolean result of i.(T) is now untyped so we make it the
// same type as the variable on the lhs.
- if ok := n.List.Second(); !ir.IsBlank(ok) && ok.Type.IsBoolean() {
- r.Type.Field(1).Type = ok.Type
+ if ok := n.List().Second(); !ir.IsBlank(ok) && ok.Type().IsBoolean() {
+ r.Type().Field(1).Type = ok.Type()
}
- n.Right = r
- n.Op = ir.OAS2FUNC
+ n.SetRight(r)
+ n.SetOp(ir.OAS2FUNC)
// don't generate a = *var if a is _
if !ir.IsBlank(a) {
var_ := temp(types.NewPtr(t.Elem()))
var_.SetTypecheck(1)
var_.MarkNonNil() // mapaccess always returns a non-nil pointer
- n.List.SetFirst(var_)
+ n.List().SetFirst(var_)
n = walkexpr(n, init)
init.Append(n)
n = ir.Nod(ir.OAS, a, ir.Nod(ir.ODEREF, var_, nil))
n = walkexpr(n, init)
case ir.ODELETE:
- init.AppendNodes(&n.Ninit)
- map_ := n.List.First()
- key := n.List.Second()
+ init.AppendNodes(n.PtrInit())
+ map_ := n.List().First()
+ key := n.List().Second()
map_ = walkexpr(map_, init)
key = walkexpr(key, init)
- t := map_.Type
+ t := map_.Type()
fast := mapfast(t)
if fast == mapslow {
// order.stmt made sure key is addressable.
n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
case ir.OAS2DOTTYPE:
- walkexprlistsafe(n.List.Slice(), init)
- n.Right = walkexpr(n.Right, init)
+ walkexprlistsafe(n.List().Slice(), init)
+ n.SetRight(walkexpr(n.Right(), init))
case ir.OCONVIFACE:
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
- fromType := n.Left.Type
- toType := n.Type
+ fromType := n.Left().Type()
+ toType := n.Type()
- if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func.Nname) { // skip unnamed functions (func _())
- markTypeUsedInInterface(fromType, Curfn.Func.LSym)
+ if !fromType.IsInterface() && !ir.IsBlank(Curfn.Func().Nname) { // skip unnamed functions (func _())
+ markTypeUsedInInterface(fromType, Curfn.Func().LSym)
}
// typeword generates the type word of the interface value.
// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
if isdirectiface(fromType) {
- l := ir.Nod(ir.OEFACE, typeword(), n.Left)
- l.Type = toType
+ l := ir.Nod(ir.OEFACE, typeword(), n.Left())
+ l.SetType(toType)
l.SetTypecheck(n.Typecheck())
n = l
break
staticuint64s.SetClass(ir.PEXTERN)
// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
// individual bytes.
- staticuint64s.Type = types.NewArray(types.Types[types.TUINT8], 256*8)
+ staticuint64s.SetType(types.NewArray(types.Types[types.TUINT8], 256*8))
zerobase = NewName(Runtimepkg.Lookup("zerobase"))
zerobase.SetClass(ir.PEXTERN)
- zerobase.Type = types.Types[types.TUINTPTR]
+ zerobase.SetType(types.Types[types.TUINTPTR])
}
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
switch {
case fromType.Size() == 0:
// n.Left is zero-sized. Use zerobase.
- cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
+ cheapexpr(n.Left(), init) // Evaluate n.Left for side-effects. See issue 19246.
value = zerobase
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
// and staticuint64s[n.Left * 8 + 7] on big-endian.
- n.Left = cheapexpr(n.Left, init)
+ n.SetLeft(cheapexpr(n.Left(), init))
// byteindex widens n.Left so that the multiplication doesn't overflow.
- index := ir.Nod(ir.OLSH, byteindex(n.Left), nodintconst(3))
+ index := ir.Nod(ir.OLSH, byteindex(n.Left()), nodintconst(3))
if thearch.LinkArch.ByteOrder == binary.BigEndian {
index = ir.Nod(ir.OADD, index, nodintconst(7))
}
value = ir.Nod(ir.OINDEX, staticuint64s, index)
value.SetBounded(true)
- case n.Left.Class() == ir.PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
+ case n.Left().Class() == ir.PEXTERN && n.Left().Name() != nil && n.Left().Name().Readonly():
// n.Left is a readonly global; use it directly.
- value = n.Left
- case !fromType.IsInterface() && n.Esc == EscNone && fromType.Width <= 1024:
+ value = n.Left()
+ case !fromType.IsInterface() && n.Esc() == EscNone && fromType.Width <= 1024:
// n.Left does not escape. Use a stack temporary initialized to n.Left.
value = temp(fromType)
- init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left), ctxStmt))
+ init.Append(typecheck(ir.Nod(ir.OAS, value, n.Left()), ctxStmt))
}
if value != nil {
// Value is identical to n.Left.
// Construct the interface directly: {type/itab, &value}.
l := ir.Nod(ir.OEFACE, typeword(), typecheck(ir.Nod(ir.OADDR, value, nil), ctxExpr))
- l.Type = toType
+ l.SetType(toType)
l.SetTypecheck(n.Typecheck())
n = l
break
if toType.IsEmptyInterface() && fromType.IsInterface() && !fromType.IsEmptyInterface() {
// Evaluate the input interface.
c := temp(fromType)
- init.Append(ir.Nod(ir.OAS, c, n.Left))
+ init.Append(ir.Nod(ir.OAS, c, n.Left()))
// Get the itab out of the interface.
tmp := temp(types.NewPtr(types.Types[types.TUINT8]))
// Get the type out of the itab.
nif := ir.Nod(ir.OIF, typecheck(ir.Nod(ir.ONE, tmp, nodnil()), ctxExpr), nil)
- nif.Nbody.Set1(ir.Nod(ir.OAS, tmp, itabType(tmp)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, tmp, itabType(tmp)))
init.Append(nif)
// Build the result.
- e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos, c, types.NewPtr(types.Types[types.TUINT8])))
- e.Type = toType // assign type manually, typecheck doesn't understand OEFACE.
+ e := ir.Nod(ir.OEFACE, tmp, ifaceData(n.Pos(), c, types.NewPtr(types.Types[types.TUINT8])))
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
e.SetTypecheck(1)
n = e
break
fn := syslook(fnname)
dowidth(fromType)
fn = substArgTypes(fn, fromType)
- dowidth(fn.Type)
+ dowidth(fn.Type())
call := ir.Nod(ir.OCALL, fn, nil)
- call.List.Set1(n.Left)
+ call.PtrList().Set1(n.Left())
call = typecheck(call, ctxExpr)
call = walkexpr(call, init)
call = safeexpr(call, init)
e := ir.Nod(ir.OEFACE, typeword(), call)
- e.Type = toType
+ e.SetType(toType)
e.SetTypecheck(1)
n = e
break
tab = typeword()
}
- v := n.Left
+ v := n.Left()
if needsaddr {
// Types of large or unknown size are passed by reference.
// Orderexpr arranged for n.Left to be a temporary for all
// with non-interface cases, is not visible to order.stmt, so we
// have to fall back on allocating a temp here.
if !islvalue(v) {
- v = copyexpr(v, v.Type, init)
+ v = copyexpr(v, v.Type(), init)
}
v = ir.Nod(ir.OADDR, v, nil)
}
dowidth(fromType)
fn := syslook(fnname)
fn = substArgTypes(fn, fromType, toType)
- dowidth(fn.Type)
+ dowidth(fn.Type())
n = ir.Nod(ir.OCALL, fn, nil)
- n.List.Set2(tab, v)
+ n.PtrList().Set2(tab, v)
n = typecheck(n, ctxExpr)
n = walkexpr(n, init)
case ir.OCONV, ir.OCONVNOP:
- n.Left = walkexpr(n.Left, init)
- if n.Op == ir.OCONVNOP && checkPtr(Curfn, 1) {
- if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T
+ n.SetLeft(walkexpr(n.Left(), init))
+ if n.Op() == ir.OCONVNOP && checkPtr(Curfn, 1) {
+ if n.Type().IsPtr() && n.Left().Type().IsUnsafePtr() { // unsafe.Pointer to *T
n = walkCheckPtrAlignment(n, init, nil)
break
}
- if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer
+ if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() { // uintptr to unsafe.Pointer
n = walkCheckPtrArithmetic(n, init)
break
}
}
- param, result := rtconvfn(n.Left.Type, n.Type)
+ param, result := rtconvfn(n.Left().Type(), n.Type())
if param == types.Txxx {
break
}
fn := ir.BasicTypeNames[param] + "to" + ir.BasicTypeNames[result]
- n = conv(mkcall(fn, types.Types[result], init, conv(n.Left, types.Types[param])), n.Type)
+ n = conv(mkcall(fn, types.Types[result], init, conv(n.Left(), types.Types[param])), n.Type())
case ir.ODIV, ir.OMOD:
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
// rewrite complex div into function call.
- et := n.Left.Type.Etype
+ et := n.Left().Type().Etype
- if isComplex[et] && n.Op == ir.ODIV {
- t := n.Type
- n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left, types.Types[types.TCOMPLEX128]), conv(n.Right, types.Types[types.TCOMPLEX128]))
+ if isComplex[et] && n.Op() == ir.ODIV {
+ t := n.Type()
+ n = mkcall("complex128div", types.Types[types.TCOMPLEX128], init, conv(n.Left(), types.Types[types.TCOMPLEX128]), conv(n.Right(), types.Types[types.TCOMPLEX128]))
n = conv(n, t)
break
}
// TODO: Remove this code once we can introduce
// runtime calls late in SSA processing.
if Widthreg < 8 && (et == types.TINT64 || et == types.TUINT64) {
- if n.Right.Op == ir.OLITERAL {
+ if n.Right().Op() == ir.OLITERAL {
// Leave div/mod by constant powers of 2 or small 16-bit constants.
// The SSA backend will handle those.
switch et {
case types.TINT64:
- c := n.Right.Int64Val()
+ c := n.Right().Int64Val()
if c < 0 {
c = -c
}
break opswitch
}
case types.TUINT64:
- c := n.Right.Uint64Val()
+ c := n.Right().Uint64Val()
if c < 1<<16 {
break opswitch
}
} else {
fn = "uint64"
}
- if n.Op == ir.ODIV {
+ if n.Op() == ir.ODIV {
fn += "div"
} else {
fn += "mod"
}
- n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
+ n = mkcall(fn, n.Type(), init, conv(n.Left(), types.Types[et]), conv(n.Right(), types.Types[et]))
}
case ir.OINDEX:
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
// save the original node for bounds checking elision.
// If it was a ODIV/OMOD walk might rewrite it.
- r := n.Right
+ r := n.Right()
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(walkexpr(n.Right(), init))
// if range of type cannot exceed static array bound,
// disable bounds check.
if n.Bounded() {
break
}
- t := n.Left.Type
+ t := n.Left().Type()
if t != nil && t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
n.SetBounded(bounded(r, t.NumElem()))
- if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right, constant.Int) {
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
base.Warn("index bounds check elided")
}
- if smallintconst(n.Right) && !n.Bounded() {
+ if smallintconst(n.Right()) && !n.Bounded() {
base.Errorf("index out of bounds")
}
- } else if ir.IsConst(n.Left, constant.String) {
- n.SetBounded(bounded(r, int64(len(n.Left.StringVal()))))
- if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right, constant.Int) {
+ } else if ir.IsConst(n.Left(), constant.String) {
+ n.SetBounded(bounded(r, int64(len(n.Left().StringVal()))))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Right(), constant.Int) {
base.Warn("index bounds check elided")
}
- if smallintconst(n.Right) && !n.Bounded() {
+ if smallintconst(n.Right()) && !n.Bounded() {
base.Errorf("index out of bounds")
}
}
- if ir.IsConst(n.Right, constant.Int) {
- if v := n.Right.Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) {
+ if ir.IsConst(n.Right(), constant.Int) {
+ if v := n.Right().Val(); constant.Sign(v) < 0 || doesoverflow(v, types.Types[types.TINT]) {
base.Errorf("index out of bounds")
}
}
case ir.OINDEXMAP:
// Replace m[k] with *map{access1,assign}(maptype, m, &k)
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
- map_ := n.Left
- key := n.Right
- t := map_.Type
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ map_ := n.Left()
+ key := n.Right()
+ t := map_.Type()
if n.IndexMapLValue() {
// This m[k] expression is on the left-hand side of an assignment.
fast := mapfast(t)
n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
}
}
- n.Type = types.NewPtr(t.Elem())
+ n.SetType(types.NewPtr(t.Elem()))
n.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
n = ir.Nod(ir.ODEREF, n, nil)
- n.Type = t.Elem()
+ n.SetType(t.Elem())
n.SetTypecheck(1)
case ir.ORECV:
base.Fatalf("walkexpr ORECV") // should see inside OAS only
case ir.OSLICEHEADER:
- n.Left = walkexpr(n.Left, init)
- n.List.SetFirst(walkexpr(n.List.First(), init))
- n.List.SetSecond(walkexpr(n.List.Second(), init))
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.List().SetFirst(walkexpr(n.List().First(), init))
+ n.List().SetSecond(walkexpr(n.List().Second(), init))
case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
- checkSlice := checkPtr(Curfn, 1) && n.Op == ir.OSLICE3ARR && n.Left.Op == ir.OCONVNOP && n.Left.Left.Type.IsUnsafePtr()
+ checkSlice := checkPtr(Curfn, 1) && n.Op() == ir.OSLICE3ARR && n.Left().Op() == ir.OCONVNOP && n.Left().Left().Type().IsUnsafePtr()
if checkSlice {
- n.Left.Left = walkexpr(n.Left.Left, init)
+ n.Left().SetLeft(walkexpr(n.Left().Left(), init))
} else {
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
}
low, high, max := n.SliceBounds()
low = walkexpr(low, init)
max = walkexpr(max, init)
n.SetSliceBounds(low, high, max)
if checkSlice {
- n.Left = walkCheckPtrAlignment(n.Left, init, max)
+ n.SetLeft(walkCheckPtrAlignment(n.Left(), init, max))
}
- if n.Op.IsSlice3() {
- if max != nil && max.Op == ir.OCAP && samesafeexpr(n.Left, max.Left) {
+ if n.Op().IsSlice3() {
+ if max != nil && max.Op() == ir.OCAP && samesafeexpr(n.Left(), max.Left()) {
// Reduce x[i:j:cap(x)] to x[i:j].
- if n.Op == ir.OSLICE3 {
- n.Op = ir.OSLICE
+ if n.Op() == ir.OSLICE3 {
+ n.SetOp(ir.OSLICE)
} else {
- n.Op = ir.OSLICEARR
+ n.SetOp(ir.OSLICEARR)
}
n = reduceSlice(n)
}
}
case ir.ONEW:
- if n.Type.Elem().NotInHeap() {
- base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem())
+ if n.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
}
- if n.Esc == EscNone {
- if n.Type.Elem().Width >= maxImplicitStackVarSize {
+ if n.Esc() == EscNone {
+ if n.Type().Elem().Width >= maxImplicitStackVarSize {
base.Fatalf("large ONEW with EscNone: %v", n)
}
- r := temp(n.Type.Elem())
+ r := temp(n.Type().Elem())
r = ir.Nod(ir.OAS, r, nil) // zero temp
r = typecheck(r, ctxStmt)
init.Append(r)
- r = ir.Nod(ir.OADDR, r.Left, nil)
+ r = ir.Nod(ir.OADDR, r.Left(), nil)
r = typecheck(r, ctxExpr)
n = r
} else {
- n = callnew(n.Type.Elem())
+ n = callnew(n.Type().Elem())
}
case ir.OADDSTR:
case ir.OCLOSE:
fn := syslook("closechan")
- fn = substArgTypes(fn, n.Left.Type)
- n = mkcall1(fn, nil, init, n.Left)
+ fn = substArgTypes(fn, n.Left().Type())
+ n = mkcall1(fn, nil, init, n.Left())
case ir.OMAKECHAN:
// When size fits into int, use makechan instead of
// makechan64, which is faster and shorter on 32 bit platforms.
- size := n.Left
+ size := n.Left()
fnname := "makechan64"
argtype := types.Types[types.TINT64]
// Type checking guarantees that TIDEAL size is positive and fits in an int.
// The case of size overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makechan during runtime.
- if size.Type.IsKind(types.TIDEAL) || size.Type.Size() <= types.Types[types.TUINT].Size() {
+ if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makechan"
argtype = types.Types[types.TINT]
}
- n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype))
+ n = mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), conv(size, argtype))
case ir.OMAKEMAP:
- t := n.Type
+ t := n.Type()
hmapType := hmap(t)
- hint := n.Left
+ hint := n.Left()
// var h *hmap
var h *ir.Node
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Allocate hmap on stack.
// var hv hmap
// var bv bmap
bv := temp(bmap(t))
zero = ir.Nod(ir.OAS, bv, nil)
- nif.Nbody.Append(zero)
+ nif.PtrBody().Append(zero)
// b = &bv
b := ir.Nod(ir.OADDR, bv, nil)
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
na := ir.Nod(ir.OAS, nodSym(ir.ODOT, h, bsym), b)
- nif.Nbody.Append(na)
+ nif.PtrBody().Append(na)
nif = typecheck(nif, ctxStmt)
nif = walkstmt(nif)
// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
// and no buckets will be allocated by makemap. Therefore,
// no buckets need to be allocated in this code path.
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Only need to initialize h.hash0 since
// hmap h has been allocated on the stack already.
// h.hash0 = fastrand()
// hmap on the heap and initialize hmap's hash0 field.
fn := syslook("makemap_small")
fn = substArgTypes(fn, t.Key(), t.Elem())
- n = mkcall1(fn, n.Type, init)
+ n = mkcall1(fn, n.Type(), init)
}
} else {
- if n.Esc != EscNone {
+ if n.Esc() != EscNone {
h = nodnil()
}
// Map initialization with a variable or large hint is
// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
// The case of hint overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makemap during runtime.
- if hint.Type.IsKind(types.TIDEAL) || hint.Type.Size() <= types.Types[types.TUINT].Size() {
+ if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
fnname = "makemap"
argtype = types.Types[types.TINT]
}
fn := syslook(fnname)
fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
- n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h)
+ n = mkcall1(fn, n.Type(), init, typename(n.Type()), conv(hint, argtype), h)
}
case ir.OMAKESLICE:
- l := n.Left
- r := n.Right
+ l := n.Left()
+ r := n.Right()
if r == nil {
r = safeexpr(l, init)
l = r
}
- t := n.Type
+ t := n.Type()
if t.Elem().NotInHeap() {
base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
if why := heapAllocReason(n); why != "" {
base.Fatalf("%v has EscNone, but %v", n, why)
}
// }
nif := ir.Nod(ir.OIF, ir.Nod(ir.OGT, conv(l, types.Types[types.TUINT64]), nodintconst(i)), nil)
niflen := ir.Nod(ir.OIF, ir.Nod(ir.OLT, l, nodintconst(0)), nil)
- niflen.Nbody.Set1(mkcall("panicmakeslicelen", nil, init))
- nif.Nbody.Append(niflen, mkcall("panicmakeslicecap", nil, init))
+ niflen.PtrBody().Set1(mkcall("panicmakeslicelen", nil, init))
+ nif.PtrBody().Append(niflen, mkcall("panicmakeslicecap", nil, init))
nif = typecheck(nif, ctxStmt)
init.Append(nif)
init.Append(a)
r := ir.Nod(ir.OSLICE, var_, nil) // arr[:l]
r.SetSliceBounds(nil, l, nil)
- r = conv(r, n.Type) // in case n.Type is named.
+ r = conv(r, n.Type()) // in case n.Type is named.
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
n = r
// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makeslice during runtime.
- if (len.Type.IsKind(types.TIDEAL) || len.Type.Size() <= types.Types[types.TUINT].Size()) &&
- (cap.Type.IsKind(types.TIDEAL) || cap.Type.Size() <= types.Types[types.TUINT].Size()) {
+ if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
+ (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
fnname = "makeslice"
argtype = types.Types[types.TINT]
}
m := ir.Nod(ir.OSLICEHEADER, nil, nil)
- m.Type = t
+ m.SetType(t)
fn := syslook(fnname)
- m.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
- m.Left.MarkNonNil()
- m.List.Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT]))
+ m.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)))
+ m.Left().MarkNonNil()
+ m.PtrList().Set2(conv(len, types.Types[types.TINT]), conv(cap, types.Types[types.TINT]))
m = typecheck(m, ctxExpr)
m = walkexpr(m, init)
}
case ir.OMAKESLICECOPY:
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
}
- t := n.Type
+ t := n.Type()
if t.Elem().NotInHeap() {
base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
- length := conv(n.Left, types.Types[types.TINT])
- copylen := ir.Nod(ir.OLEN, n.Right, nil)
- copyptr := ir.Nod(ir.OSPTR, n.Right, nil)
+ length := conv(n.Left(), types.Types[types.TINT])
+ copylen := ir.Nod(ir.OLEN, n.Right(), nil)
+ copyptr := ir.Nod(ir.OSPTR, n.Right(), nil)
if !t.Elem().HasPointers() && n.Bounded() {
// When len(to)==len(from) and elements have no pointers:
// instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
fn := syslook("mallocgc")
sh := ir.Nod(ir.OSLICEHEADER, nil, nil)
- sh.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false))
- sh.Left.MarkNonNil()
- sh.List.Set2(length, length)
- sh.Type = t
+ sh.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, nodnil(), nodbool(false)))
+ sh.Left().MarkNonNil()
+ sh.PtrList().Set2(length, length)
+ sh.SetType(t)
s := temp(t)
r := typecheck(ir.Nod(ir.OAS, s, sh), ctxStmt)
// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
fn := syslook("makeslicecopy")
s := ir.Nod(ir.OSLICEHEADER, nil, nil)
- s.Left = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR]))
- s.Left.MarkNonNil()
- s.List.Set2(length, length)
- s.Type = t
+ s.SetLeft(mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, conv(copyptr, types.Types[types.TUNSAFEPTR])))
+ s.Left().MarkNonNil()
+ s.PtrList().Set2(length, length)
+ s.SetType(t)
n = typecheck(s, ctxExpr)
n = walkexpr(n, init)
}
case ir.ORUNESTR:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
t := types.NewArray(types.Types[types.TUINT8], 4)
a = ir.Nod(ir.OADDR, temp(t), nil)
}
// intstring(*[4]byte, rune)
- n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[types.TINT64]))
+ n = mkcall("intstring", n.Type(), init, a, conv(n.Left(), types.Types[types.TINT64]))
case ir.OBYTES2STR, ir.ORUNES2STR:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for string on stack.
t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
a = ir.Nod(ir.OADDR, temp(t), nil)
}
- if n.Op == ir.ORUNES2STR {
+ if n.Op() == ir.ORUNES2STR {
// slicerunetostring(*[32]byte, []rune) string
- n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
+ n = mkcall("slicerunetostring", n.Type(), init, a, n.Left())
} else {
// slicebytetostring(*[32]byte, ptr *byte, n int) string
- n.Left = cheapexpr(n.Left, init)
- ptr, len := backingArrayPtrLen(n.Left)
- n = mkcall("slicebytetostring", n.Type, init, a, ptr, len)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptr, len := backingArrayPtrLen(n.Left())
+ n = mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
}
case ir.OBYTES2STRTMP:
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
if !instrumenting {
// Let the backend handle OBYTES2STRTMP directly
// to avoid a function call to slicebytetostringtmp.
break
}
// slicebytetostringtmp(ptr *byte, n int) string
- n.Left = cheapexpr(n.Left, init)
- ptr, len := backingArrayPtrLen(n.Left)
- n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptr, len := backingArrayPtrLen(n.Left())
+ n = mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
case ir.OSTR2BYTES:
- s := n.Left
+ s := n.Left()
if ir.IsConst(s, constant.String) {
sc := s.StringVal()
// Allocate a [n]byte of the right size.
t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
var a *ir.Node
- if n.Esc == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
+ if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
a = ir.Nod(ir.OADDR, temp(t), nil)
} else {
a = callnew(t)
}
// Slice the [n]byte to a []byte.
- n.Op = ir.OSLICEARR
- n.Left = p
+ n.SetOp(ir.OSLICEARR)
+ n.SetLeft(p)
n = walkexpr(n, init)
break
}
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for slice on stack.
t := types.NewArray(types.Types[types.TUINT8], tmpstringbufsize)
a = ir.Nod(ir.OADDR, temp(t), nil)
}
// stringtoslicebyte(*32[byte], string) []byte
- n = mkcall("stringtoslicebyte", n.Type, init, a, conv(s, types.Types[types.TSTRING]))
+ n = mkcall("stringtoslicebyte", n.Type(), init, a, conv(s, types.Types[types.TSTRING]))
case ir.OSTR2BYTESTMP:
// []byte(string) conversion that creates a slice
// that know that the slice won't be mutated.
// The only such case today is:
// for i, c := range []byte(string)
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
case ir.OSTR2RUNES:
a := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
// Create temporary buffer for slice on stack.
t := types.NewArray(types.Types[types.TINT32], tmpstringbufsize)
a = ir.Nod(ir.OADDR, temp(t), nil)
}
// stringtoslicerune(*[32]rune, string) []rune
- n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[types.TSTRING]))
+ n = mkcall("stringtoslicerune", n.Type(), init, a, conv(n.Left(), types.Types[types.TSTRING]))
case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
- if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
+ if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) {
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
- vstat := readonlystaticname(n.Type)
+ vstat := readonlystaticname(n.Type())
fixedlit(inInitFunction, initKindStatic, n, vstat, init)
n = vstat
n = typecheck(n, ctxExpr)
break
}
- var_ := temp(n.Type)
+ var_ := temp(n.Type())
anylit(n, var_, init)
n = var_
case ir.OSEND:
- n1 := n.Right
- n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
+ n1 := n.Right()
+ n1 = assignconv(n1, n.Left().Type().Elem(), "chan send")
n1 = walkexpr(n1, init)
n1 = ir.Nod(ir.OADDR, n1, nil)
- n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1)
+ n = mkcall1(chanfn("chansend1", 2, n.Left().Type()), nil, init, n.Left(), n1)
case ir.OCLOSURE:
n = walkclosure(n, init)
// constants until walk. For example, if n is y%1 == 0, the
// walk of y%1 may have replaced it by 0.
// Check whether n with its updated args is itself now a constant.
- t := n.Type
+ t := n.Type()
n = evalConst(n)
- if n.Type != t {
- base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type)
+ if n.Type() != t {
+ base.Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type())
}
- if n.Op == ir.OLITERAL {
+ if n.Op() == ir.OLITERAL {
n = typecheck(n, ctxExpr)
// Emit string symbol now to avoid emitting
// any concurrently during the backend.
if v := n.Val(); v.Kind() == constant.String {
- _ = stringsym(n.Pos, constant.StringVal(v))
+ _ = stringsym(n.Pos(), constant.StringVal(v))
}
}
// markUsedIfaceMethod marks that an interface method is used in the current
// function. n is OCALLINTER node.
func markUsedIfaceMethod(n *ir.Node) {
- ityp := n.Left.Left.Type
+ ityp := n.Left().Left().Type()
tsym := typenamesym(ityp).Linksym()
- r := obj.Addrel(Curfn.Func.LSym)
+ r := obj.Addrel(Curfn.Func().LSym)
r.Sym = tsym
// n.Left.Xoffset is the method index * Widthptr (the offset of code pointer
// in itab).
- midx := n.Left.Xoffset / int64(Widthptr)
+ midx := n.Left().Offset() / int64(Widthptr)
r.Add = ifaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
// TODO(josharian): combine this with its caller and simplify
func reduceSlice(n *ir.Node) *ir.Node {
low, high, max := n.SliceBounds()
- if high != nil && high.Op == ir.OLEN && samesafeexpr(n.Left, high.Left) {
+ if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.Left()) {
// Reduce x[i:len(x)] to x[i:].
high = nil
}
n.SetSliceBounds(low, high, max)
- if (n.Op == ir.OSLICE || n.Op == ir.OSLICESTR) && low == nil && high == nil {
+ if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && low == nil && high == nil {
// Reduce x[:] to x.
if base.Debug.Slice > 0 {
base.Warn("slice: omit slice operation")
}
- return n.Left
+ return n.Left()
}
return n
}
// making it impossible for reorder3 to work.
n := ir.Nod(ir.OAS, l, r)
- if l.Op == ir.OINDEXMAP {
+ if l.Op() == ir.OINDEXMAP {
return n
}
// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
func fncall(l *ir.Node, rt *types.Type) bool {
- if l.HasCall() || l.Op == ir.OINDEXMAP {
+ if l.HasCall() || l.Op() == ir.OINDEXMAP {
return true
}
- if types.Identical(l.Type, rt) {
+ if types.Identical(l.Type(), rt) {
return false
}
// There might be a conversion required, which might involve a runtime call.
}
res := ir.Nod(ir.ORESULT, nil, nil)
- res.Xoffset = base.Ctxt.FixedFrameSize() + r.Offset
- res.Type = r.Type
+ res.SetOffset(base.Ctxt.FixedFrameSize() + r.Offset)
+ res.SetType(r.Type)
res.SetTypecheck(1)
a := ir.Nod(ir.OAS, l, res)
var n *ir.Node
if len(args) == 0 {
n = nodnil()
- n.Type = typ
+ n.SetType(typ)
} else {
n = ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
- n.List.Append(args...)
+ n.PtrList().Append(args...)
n.SetImplicit(true)
}
n = typecheck(n, ctxExpr)
- if n.Type == nil {
+ if n.Type() == nil {
base.Fatalf("mkdotargslice: typecheck failed")
}
return n
// fixVariadicCall rewrites calls to variadic functions to use an
// explicit ... argument if one is not already present.
func fixVariadicCall(call *ir.Node) {
- fntype := call.Left.Type
+ fntype := call.Left().Type()
if !fntype.IsVariadic() || call.IsDDD() {
return
}
vi := fntype.NumParams() - 1
vt := fntype.Params().Field(vi).Type
- args := call.List.Slice()
+ args := call.List().Slice()
extra := args[vi:]
slice := mkdotargslice(vt, extra)
for i := range extra {
extra[i] = nil // allow GC
}
- call.List.Set(append(args[:vi], slice))
+ call.PtrList().Set(append(args[:vi], slice))
call.SetIsDDD(true)
}
func walkCall(n *ir.Node, init *ir.Nodes) {
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
return // already walked
}
- params := n.Left.Type.Params()
- args := n.List.Slice()
+ params := n.Left().Type().Params()
+ args := n.List().Slice()
- n.Left = walkexpr(n.Left, init)
+ n.SetLeft(walkexpr(n.Left(), init))
walkexprlist(args, init)
// If this is a method call, add the receiver at the beginning of the args.
- if n.Op == ir.OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
withRecv := make([]*ir.Node, len(args)+1)
- withRecv[0] = n.Left.Left
- n.Left.Left = nil
+ withRecv[0] = n.Left().Left()
+ n.Left().SetLeft(nil)
copy(withRecv[1:], args)
args = withRecv
}
updateHasCall(arg)
// Determine param type.
var t *types.Type
- if n.Op == ir.OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
if i == 0 {
- t = n.Left.Type.Recv().Type
+ t = n.Left().Type().Recv().Type
} else {
t = params.Field(i - 1).Type
}
}
}
- n.List.Set(tempAssigns)
- n.Rlist.Set(args)
+ n.PtrList().Set(tempAssigns)
+ n.PtrRlist().Set(args)
}
// generate code for print
func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node {
// Hoist all the argument evaluation up before the lock.
- walkexprlistcheap(nn.List.Slice(), init)
+ walkexprlistcheap(nn.List().Slice(), init)
// For println, add " " between elements and "\n" at the end.
- if nn.Op == ir.OPRINTN {
- s := nn.List.Slice()
+ if nn.Op() == ir.OPRINTN {
+ s := nn.List().Slice()
t := make([]*ir.Node, 0, len(s)*2)
for i, n := range s {
if i != 0 {
t = append(t, n)
}
t = append(t, nodstr("\n"))
- nn.List.Set(t)
+ nn.PtrList().Set(t)
}
// Collapse runs of constant strings.
- s := nn.List.Slice()
+ s := nn.List().Slice()
t := make([]*ir.Node, 0, len(s))
for i := 0; i < len(s); {
var strs []string
i++
}
}
- nn.List.Set(t)
+ nn.PtrList().Set(t)
calls := []*ir.Node{mkcall("printlock", nil, init)}
- for i, n := range nn.List.Slice() {
- if n.Op == ir.OLITERAL {
- if n.Type == types.UntypedRune {
+ for i, n := range nn.List().Slice() {
+ if n.Op() == ir.OLITERAL {
+ if n.Type() == types.UntypedRune {
n = defaultlit(n, types.Runetype)
}
}
}
- if n.Op != ir.OLITERAL && n.Type != nil && n.Type.Etype == types.TIDEAL {
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Etype == types.TIDEAL {
n = defaultlit(n, types.Types[types.TINT64])
}
n = defaultlit(n, nil)
- nn.List.SetIndex(i, n)
- if n.Type == nil || n.Type.Etype == types.TFORW {
+ nn.List().SetIndex(i, n)
+ if n.Type() == nil || n.Type().Etype == types.TFORW {
continue
}
var on *ir.Node
- switch n.Type.Etype {
+ switch n.Type().Etype {
case types.TINTER:
- if n.Type.IsEmptyInterface() {
+ if n.Type().IsEmptyInterface() {
on = syslook("printeface")
} else {
on = syslook("printiface")
}
- on = substArgTypes(on, n.Type) // any-1
+ on = substArgTypes(on, n.Type()) // any-1
case types.TPTR:
- if n.Type.Elem().NotInHeap() {
+ if n.Type().Elem().NotInHeap() {
on = syslook("printuintptr")
n = ir.Nod(ir.OCONV, n, nil)
- n.Type = types.Types[types.TUNSAFEPTR]
+ n.SetType(types.Types[types.TUNSAFEPTR])
n = ir.Nod(ir.OCONV, n, nil)
- n.Type = types.Types[types.TUINTPTR]
+ n.SetType(types.Types[types.TUINTPTR])
break
}
fallthrough
case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
on = syslook("printpointer")
- on = substArgTypes(on, n.Type) // any-1
+ on = substArgTypes(on, n.Type()) // any-1
case types.TSLICE:
on = syslook("printslice")
- on = substArgTypes(on, n.Type) // any-1
+ on = substArgTypes(on, n.Type()) // any-1
case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
- if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" {
+ if isRuntimePkg(n.Type().Sym.Pkg) && n.Type().Sym.Name == "hex" {
on = syslook("printhex")
} else {
on = syslook("printuint")
on = syslook("printstring")
}
default:
- badtype(ir.OPRINT, n.Type, nil)
+ badtype(ir.OPRINT, n.Type(), nil)
continue
}
r := ir.Nod(ir.OCALL, on, nil)
- if params := on.Type.Params().FieldSlice(); len(params) > 0 {
+ if params := on.Type().Params().FieldSlice(); len(params) > 0 {
t := params[0].Type
- if !types.Identical(t, n.Type) {
+ if !types.Identical(t, n.Type()) {
n = ir.Nod(ir.OCONV, n, nil)
- n.Type = t
+ n.SetType(t)
}
- r.List.Append(n)
+ r.PtrList().Append(n)
}
calls = append(calls, r)
}
r := ir.Nod(ir.OEMPTY, nil, nil)
r = typecheck(r, ctxStmt)
r = walkexpr(r, init)
- r.Ninit.Set(calls)
+ r.PtrInit().Set(calls)
return r
}
func callnew(t *types.Type) *ir.Node {
dowidth(t)
n := ir.Nod(ir.ONEWOBJ, typename(t), nil)
- n.Type = types.NewPtr(t)
+ n.SetType(types.NewPtr(t))
n.SetTypecheck(1)
n.MarkNonNil()
return n
// isReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
func isReflectHeaderDataField(l *ir.Node) bool {
- if l.Type != types.Types[types.TUINTPTR] {
+ if l.Type() != types.Types[types.TUINTPTR] {
return false
}
var tsym *types.Sym
- switch l.Op {
+ switch l.Op() {
case ir.ODOT:
- tsym = l.Left.Type.Sym
+ tsym = l.Left().Type().Sym
case ir.ODOTPTR:
- tsym = l.Left.Type.Elem().Sym
+ tsym = l.Left().Type().Elem().Sym
default:
return false
}
- if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
+ if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
return false
}
return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
}
func convas(n *ir.Node, init *ir.Nodes) *ir.Node {
- if n.Op != ir.OAS {
- base.Fatalf("convas: not OAS %v", n.Op)
+ if n.Op() != ir.OAS {
+ base.Fatalf("convas: not OAS %v", n.Op())
}
defer updateHasCall(n)
n.SetTypecheck(1)
- if n.Left == nil || n.Right == nil {
+ if n.Left() == nil || n.Right() == nil {
return n
}
- lt := n.Left.Type
- rt := n.Right.Type
+ lt := n.Left().Type()
+ rt := n.Right().Type()
if lt == nil || rt == nil {
return n
}
- if ir.IsBlank(n.Left) {
- n.Right = defaultlit(n.Right, nil)
+ if ir.IsBlank(n.Left()) {
+ n.SetRight(defaultlit(n.Right(), nil))
return n
}
if !types.Identical(lt, rt) {
- n.Right = assignconv(n.Right, lt, "assignment")
- n.Right = walkexpr(n.Right, init)
+ n.SetRight(assignconv(n.Right(), lt, "assignment"))
+ n.SetRight(walkexpr(n.Right(), init))
}
- dowidth(n.Right.Type)
+ dowidth(n.Right().Type())
return n
}
var mapinit ir.Nodes
for i, n := range all {
- l := n.Left
+ l := n.Left()
// Save subexpressions needed on left side.
// Drill through non-dereferences.
for {
- if l.Op == ir.ODOT || l.Op == ir.OPAREN {
- l = l.Left
+ if l.Op() == ir.ODOT || l.Op() == ir.OPAREN {
+ l = l.Left()
continue
}
- if l.Op == ir.OINDEX && l.Left.Type.IsArray() {
- l.Right = reorder3save(l.Right, all, i, &early)
- l = l.Left
+ if l.Op() == ir.OINDEX && l.Left().Type().IsArray() {
+ l.SetRight(reorder3save(l.Right(), all, i, &early))
+ l = l.Left()
continue
}
break
}
- switch l.Op {
+ switch l.Op() {
default:
- base.Fatalf("reorder3 unexpected lvalue %#v", l.Op)
+ base.Fatalf("reorder3 unexpected lvalue %#v", l.Op())
case ir.ONAME:
break
case ir.OINDEX, ir.OINDEXMAP:
- l.Left = reorder3save(l.Left, all, i, &early)
- l.Right = reorder3save(l.Right, all, i, &early)
- if l.Op == ir.OINDEXMAP {
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
+ l.SetRight(reorder3save(l.Right(), all, i, &early))
+ if l.Op() == ir.OINDEXMAP {
all[i] = convas(all[i], &mapinit)
}
case ir.ODEREF, ir.ODOTPTR:
- l.Left = reorder3save(l.Left, all, i, &early)
+ l.SetLeft(reorder3save(l.Left(), all, i, &early))
}
// Save expression on right side.
- all[i].Right = reorder3save(all[i].Right, all, i, &early)
+ all[i].SetRight(reorder3save(all[i].Right(), all, i, &early))
}
early = append(mapinit.Slice(), early...)
return n
}
- q := temp(n.Type)
+ q := temp(n.Type())
q = ir.Nod(ir.OAS, q, n)
q = typecheck(q, ctxStmt)
*early = append(*early, q)
- return q.Left
+ return q.Left()
}
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
func outervalue(n *ir.Node) *ir.Node {
for {
- switch n.Op {
+ switch n.Op() {
case ir.OXDOT:
base.Fatalf("OXDOT in walk")
case ir.ODOT, ir.OPAREN, ir.OCONVNOP:
- n = n.Left
+ n = n.Left()
continue
case ir.OINDEX:
- if n.Left.Type != nil && n.Left.Type.IsArray() {
- n = n.Left
+ if n.Left().Type() != nil && n.Left().Type().IsArray() {
+ n = n.Left()
continue
}
}
// Treat all fields of a struct as referring to the whole struct.
// We could do better but we would have to keep track of the fields.
- for r.Op == ir.ODOT {
- r = r.Left
+ for r.Op() == ir.ODOT {
+ r = r.Left()
}
// Look for obvious aliasing: a variable being assigned
memwrite := false
for _, as := range all {
// We can ignore assignments to blank.
- if ir.IsBlank(as.Left) {
+ if ir.IsBlank(as.Left()) {
continue
}
- l := outervalue(as.Left)
- if l.Op != ir.ONAME {
+ l := outervalue(as.Left())
+ if l.Op() != ir.ONAME {
memwrite = true
continue
}
continue
case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
- if l.Name.Addrtaken() {
+ if l.Name().Addrtaken() {
memwrite = true
continue
}
return true
}
- switch n.Op {
+ switch n.Op() {
case ir.OLITERAL, ir.ONIL:
return true
case ir.ONAME:
switch n.Class() {
case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
- if !n.Name.Addrtaken() {
+ if !n.Name().Addrtaken() {
return true
}
}
ir.OCONVNOP,
ir.OCONVIFACE,
ir.ODOTTYPE:
- return varexpr(n.Left) && varexpr(n.Right)
+ return varexpr(n.Left()) && varexpr(n.Right())
case ir.ODOT: // but not ODOTPTR
// Should have been handled in aliased.
if r == nil {
return false
}
- switch r.Op {
+ switch r.Op() {
// match each right given left
case ir.ONAME:
return l == r
return false
}
- if vmatch2(l, r.Left) {
+ if vmatch2(l, r.Left()) {
return true
}
- if vmatch2(l, r.Right) {
+ if vmatch2(l, r.Right()) {
return true
}
- for _, n := range r.List.Slice() {
+ for _, n := range r.List().Slice() {
if vmatch2(l, n) {
return true
}
if l == nil || r == nil {
return false
}
- switch l.Op {
+ switch l.Op() {
case ir.ONAME:
switch l.Class() {
case ir.PPARAM, ir.PAUTO:
return false
}
- if vmatch1(l.Left, r) {
+ if vmatch1(l.Left(), r) {
return true
}
- if vmatch1(l.Right, r) {
+ if vmatch1(l.Right(), r) {
return true
}
- for _, n := range l.List.Slice() {
+ for _, n := range l.List().Slice() {
if vmatch1(n, r) {
return true
}
var nn []*ir.Node
for _, t := range params.Fields().Slice() {
v := ir.AsNode(t.Nname)
- if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
+ if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
v = nil
}
if v == nil {
continue
}
- if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
+ if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil {
nn = append(nn, walkstmt(ir.Nod(ir.ODCL, v, nil)))
if stackcopy.Class() == ir.PPARAM {
nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, v, stackcopy), ctxStmt)))
// even allocations to move params/results to the heap.
// The generated code is added to Curfn's Enter list.
func zeroResults() {
- for _, f := range Curfn.Type.Results().Fields().Slice() {
+ for _, f := range Curfn.Type().Results().Fields().Slice() {
v := ir.AsNode(f.Nname)
- if v != nil && v.Name.Param.Heapaddr != nil {
+ if v != nil && v.Name().Param.Heapaddr != nil {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:livenessepilogue.
// I don't think the zeroing below matters.
// The stack return value will never be marked as live anywhere in the function.
// It is not written to until deferreturn returns.
- v = v.Name.Param.Stackcopy
+ v = v.Name().Param.Stackcopy
}
// Zero the stack location containing f.
- Curfn.Func.Enter.Append(ir.NodAt(Curfn.Pos, ir.OAS, v, nil))
+ Curfn.Func().Enter.Append(ir.NodAt(Curfn.Pos(), ir.OAS, v, nil))
}
}
if v == nil {
continue
}
- if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT {
+ if stackcopy := v.Name().Param.Stackcopy; stackcopy != nil && stackcopy.Class() == ir.PPARAMOUT {
nn = append(nn, walkstmt(typecheck(ir.Nod(ir.OAS, stackcopy, v), ctxStmt)))
}
}
// Enter and Exit lists.
func heapmoves() {
lno := base.Pos
- base.Pos = Curfn.Pos
- nn := paramstoheap(Curfn.Type.Recvs())
- nn = append(nn, paramstoheap(Curfn.Type.Params())...)
- nn = append(nn, paramstoheap(Curfn.Type.Results())...)
- Curfn.Func.Enter.Append(nn...)
- base.Pos = Curfn.Func.Endlineno
- Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
+ base.Pos = Curfn.Pos()
+ nn := paramstoheap(Curfn.Type().Recvs())
+ nn = append(nn, paramstoheap(Curfn.Type().Params())...)
+ nn = append(nn, paramstoheap(Curfn.Type().Results())...)
+ Curfn.Func().Enter.Append(nn...)
+ base.Pos = Curfn.Func().Endlineno
+ Curfn.Func().Exit.Append(returnsfromheap(Curfn.Type().Results())...)
base.Pos = lno
}
func vmkcall(fn *ir.Node, t *types.Type, init *ir.Nodes, va []*ir.Node) *ir.Node {
- if fn.Type == nil || fn.Type.Etype != types.TFUNC {
- base.Fatalf("mkcall %v %v", fn, fn.Type)
+ if fn.Type() == nil || fn.Type().Etype != types.TFUNC {
+ base.Fatalf("mkcall %v %v", fn, fn.Type())
}
- n := fn.Type.NumParams()
+ n := fn.Type().NumParams()
if n != len(va) {
base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
}
r := ir.Nod(ir.OCALL, fn, nil)
- r.List.Set(va)
- if fn.Type.NumResults() > 0 {
+ r.PtrList().Set(va)
+ if fn.Type().NumResults() > 0 {
r = typecheck(r, ctxExpr|ctxMultiOK)
} else {
r = typecheck(r, ctxStmt)
}
r = walkexpr(r, init)
- r.Type = t
+ r.SetType(t)
return r
}
}
func conv(n *ir.Node, t *types.Type) *ir.Node {
- if types.Identical(n.Type, t) {
+ if types.Identical(n.Type(), t) {
return n
}
n = ir.Nod(ir.OCONV, n, nil)
- n.Type = t
+ n.SetType(t)
n = typecheck(n, ctxExpr)
return n
}
// convnop converts node n to type t using the OCONVNOP op
// and typechecks the result with ctxExpr.
func convnop(n *ir.Node, t *types.Type) *ir.Node {
- if types.Identical(n.Type, t) {
+ if types.Identical(n.Type(), t) {
return n
}
n = ir.Nod(ir.OCONVNOP, n, nil)
- n.Type = t
+ n.SetType(t)
n = typecheck(n, ctxExpr)
return n
}
// While converting from int8 to int is possible, it would yield
// the wrong result for negative values.
// Reinterpreting the value as an unsigned byte solves both cases.
- if !types.Identical(n.Type, types.Types[types.TUINT8]) {
+ if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
n = ir.Nod(ir.OCONV, n, nil)
- n.Type = types.Types[types.TUINT8]
+ n.SetType(types.Types[types.TUINT8])
n.SetTypecheck(1)
}
n = ir.Nod(ir.OCONV, n, nil)
- n.Type = types.Types[types.TINT]
+ n.SetType(types.Types[types.TINT])
n.SetTypecheck(1)
return n
}
func addstr(n *ir.Node, init *ir.Nodes) *ir.Node {
// order.expr rewrote OADDSTR to have a list of strings.
- c := n.List.Len()
+ c := n.List().Len()
if c < 2 {
base.Fatalf("addstr count %d too small", c)
}
buf := nodnil()
- if n.Esc == EscNone {
+ if n.Esc() == EscNone {
sz := int64(0)
- for _, n1 := range n.List.Slice() {
- if n1.Op == ir.OLITERAL {
+ for _, n1 := range n.List().Slice() {
+ if n1.Op() == ir.OLITERAL {
sz += int64(len(n1.StringVal()))
}
}
// build list of string arguments
args := []*ir.Node{buf}
- for _, n2 := range n.List.Slice() {
+ for _, n2 := range n.List().Slice() {
args = append(args, conv(n2, types.Types[types.TSTRING]))
}
if prealloc[n] != nil {
prealloc[slice] = prealloc[n]
}
- slice.List.Set(args[1:]) // skip buf arg
+ slice.PtrList().Set(args[1:]) // skip buf arg
args = []*ir.Node{buf, slice}
- slice.Esc = EscNone
+ slice.SetEsc(EscNone)
}
cat := syslook(fn)
r := ir.Nod(ir.OCALL, cat, nil)
- r.List.Set(args)
+ r.PtrList().Set(args)
r = typecheck(r, ctxExpr)
r = walkexpr(r, init)
- r.Type = n.Type
+ r.SetType(n.Type())
return r
}
func walkAppendArgs(n *ir.Node, init *ir.Nodes) {
- walkexprlistsafe(n.List.Slice(), init)
+ walkexprlistsafe(n.List().Slice(), init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
- ls := n.List.Slice()
+ ls := n.List().Slice()
for i1, n1 := range ls {
ls[i1] = cheapexpr(n1, init)
}
func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
walkAppendArgs(n, init)
- l1 := n.List.First()
- l2 := n.List.Second()
+ l1 := n.List().First()
+ l2 := n.List().Second()
l2 = cheapexpr(l2, init)
- n.List.SetSecond(l2)
+ n.List().SetSecond(l2)
var nodes ir.Nodes
// var s []T
- s := temp(l1.Type)
+ s := temp(l1.Type())
nodes.Append(ir.Nod(ir.OAS, s, l1)) // s = l1
- elemtype := s.Type.Elem()
+ elemtype := s.Type().Elem()
// n := len(s) + len(l2)
nn := temp(types.Types[types.TINT])
nif := ir.Nod(ir.OIF, nil, nil)
nuint := conv(nn, types.Types[types.TUINT])
scapuint := conv(ir.Nod(ir.OCAP, s, nil), types.Types[types.TUINT])
- nif.Left = ir.Nod(ir.OGT, nuint, scapuint)
+ nif.SetLeft(ir.Nod(ir.OGT, nuint, scapuint))
// instantiate growslice(typ *type, []any, int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Nbody.Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
nodes.Append(nif)
// s = s[:n]
if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
nptr1 := ir.Nod(ir.OSLICE, s, nil)
- nptr1.Type = s.Type
+ nptr1.SetType(s.Type())
nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
nptr1 = cheapexpr(nptr1, &nodes)
nptr2 := l2
- Curfn.Func.SetWBPos(n.Pos)
+ Curfn.Func().SetWBPos(n.Pos())
// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
fn := syslook("typedslicecopy")
- fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem())
+ fn = substArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
ptr1, len1 := backingArrayPtrLen(nptr1)
ptr2, len2 := backingArrayPtrLen(nptr2)
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
// copy(s[len(l1):], l2)
// l2 can be a slice or string.
nptr1 := ir.Nod(ir.OSLICE, s, nil)
- nptr1.Type = s.Type
+ nptr1.SetType(s.Type())
nptr1.SetSliceBounds(ir.Nod(ir.OLEN, l1, nil), nil, nil)
nptr1 = cheapexpr(nptr1, &nodes)
nptr2 := l2
ptr2, len2 := backingArrayPtrLen(nptr2)
fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem())
+ fn = substArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
base.Fatalf("missing typecheck: %+v", n)
}
- if n.Op != ir.OAPPEND || !n.IsDDD() || n.List.Len() != 2 {
+ if n.Op() != ir.OAPPEND || !n.IsDDD() || n.List().Len() != 2 {
return false
}
- second := n.List.Second()
- if second.Op != ir.OMAKESLICE || second.Right != nil {
+ second := n.List().Second()
+ if second.Op() != ir.OMAKESLICE || second.Right() != nil {
return false
}
// typecheck made sure that constant arguments to make are not negative and fit into an int.
// The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
- y := second.Left
- if !ir.IsConst(y, constant.Int) && y.Type.Size() > types.Types[types.TUINT].Size() {
+ y := second.Left()
+ if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
return false
}
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
// check of l2 < 0 at runtime which is generated below.
- l2 := conv(n.List.Second().Left, types.Types[types.TINT])
+ l2 := conv(n.List().Second().Left(), types.Types[types.TINT])
l2 = typecheck(l2, ctxExpr)
- n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
+ n.List().SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
walkAppendArgs(n, init)
- l1 := n.List.First()
- l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs
+ l1 := n.List().First()
+ l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs
var nodes []*ir.Node
nifneg.SetLikely(true)
// else panicmakeslicelen()
- nifneg.Rlist.Set1(mkcall("panicmakeslicelen", nil, init))
+ nifneg.PtrRlist().Set1(mkcall("panicmakeslicelen", nil, init))
nodes = append(nodes, nifneg)
// s := l1
- s := temp(l1.Type)
+ s := temp(l1.Type())
nodes = append(nodes, ir.Nod(ir.OAS, s, l1))
- elemtype := s.Type.Elem()
+ elemtype := s.Type().Elem()
// n := len(s) + l2
nn := temp(types.Types[types.TINT])
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Nbody.Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
+ nif.PtrBody().Set1(ir.Nod(ir.OAS, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn)))
nodes = append(nodes, nif)
// s = s[:n]
nodes = append(nodes, ir.Nod(ir.OAS, s, nt))
// lptr := &l1[0]
- l1ptr := temp(l1.Type.Elem().PtrTo())
+ l1ptr := temp(l1.Type().Elem().PtrTo())
tmp := ir.Nod(ir.OSPTR, l1, nil)
nodes = append(nodes, ir.Nod(ir.OAS, l1ptr, tmp))
hasPointers := elemtype.HasPointers()
if hasPointers {
clrname = "memclrHasPointers"
- Curfn.Func.SetWBPos(n.Pos)
+ Curfn.Func().SetWBPos(n.Pos())
}
var clr ir.Nodes
if hasPointers {
// if l1ptr == sptr
nifclr := ir.Nod(ir.OIF, ir.Nod(ir.OEQ, l1ptr, sptr), nil)
- nifclr.Nbody = clr
+ nifclr.SetBody(clr)
nodes = append(nodes, nifclr)
} else {
nodes = append(nodes, clr.Slice()...)
// }
// s
func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node {
- if !samesafeexpr(dst, n.List.First()) {
- n.List.SetFirst(safeexpr(n.List.First(), init))
- n.List.SetFirst(walkexpr(n.List.First(), init))
+ if !samesafeexpr(dst, n.List().First()) {
+ n.List().SetFirst(safeexpr(n.List().First(), init))
+ n.List().SetFirst(walkexpr(n.List().First(), init))
}
- walkexprlistsafe(n.List.Slice()[1:], init)
+ walkexprlistsafe(n.List().Slice()[1:], init)
- nsrc := n.List.First()
+ nsrc := n.List().First()
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
- ls := n.List.Slice()[1:]
+ ls := n.List().Slice()[1:]
for i, n := range ls {
n = cheapexpr(n, init)
- if !types.Identical(n.Type, nsrc.Type.Elem()) {
- n = assignconv(n, nsrc.Type.Elem(), "append")
+ if !types.Identical(n.Type(), nsrc.Type().Elem()) {
+ n = assignconv(n, nsrc.Type().Elem(), "append")
n = walkexpr(n, init)
}
ls[i] = n
}
- argc := n.List.Len() - 1
+ argc := n.List().Len() - 1
if argc < 1 {
return nsrc
}
var l []*ir.Node
- ns := temp(nsrc.Type)
+ ns := temp(nsrc.Type())
l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src
na := nodintconst(int64(argc)) // const argc
nx := ir.Nod(ir.OIF, nil, nil) // if cap(s) - len(s) < argc
- nx.Left = ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na)
+ nx.SetLeft(ir.Nod(ir.OLT, ir.Nod(ir.OSUB, ir.Nod(ir.OCAP, ns, nil), ir.Nod(ir.OLEN, ns, nil)), na))
fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
- fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
+ fn = substArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
- nx.Nbody.Set1(ir.Nod(ir.OAS, ns,
- mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
+ nx.PtrBody().Set1(ir.Nod(ir.OAS, ns,
+ mkcall1(fn, ns.Type(), nx.PtrInit(), typename(ns.Type().Elem()), ns,
ir.Nod(ir.OADD, ir.Nod(ir.OLEN, ns, nil), na))))
l = append(l, nx)
nx.SetBounded(true)
l = append(l, ir.Nod(ir.OAS, ns, nx)) // s = s[:n+argc]
- ls = n.List.Slice()[1:]
+ ls = n.List().Slice()[1:]
for i, n := range ls {
nx = ir.Nod(ir.OINDEX, ns, nn) // s[n] ...
nx.SetBounded(true)
// Also works if b is a string.
//
func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node {
- if n.Left.Type.Elem().HasPointers() {
- Curfn.Func.SetWBPos(n.Pos)
- fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
- n.Left = cheapexpr(n.Left, init)
- ptrL, lenL := backingArrayPtrLen(n.Left)
- n.Right = cheapexpr(n.Right, init)
- ptrR, lenR := backingArrayPtrLen(n.Right)
- return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR)
+ if n.Left().Type().Elem().HasPointers() {
+ Curfn.Func().SetWBPos(n.Pos())
+ fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem())
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptrL, lenL := backingArrayPtrLen(n.Left())
+ n.SetRight(cheapexpr(n.Right(), init))
+ ptrR, lenR := backingArrayPtrLen(n.Right())
+ return mkcall1(fn, n.Type(), init, typename(n.Left().Type().Elem()), ptrL, lenL, ptrR, lenR)
}
if runtimecall {
// copy(n.Left, n.Right)
// n.Right can be a slice or string.
- n.Left = cheapexpr(n.Left, init)
- ptrL, lenL := backingArrayPtrLen(n.Left)
- n.Right = cheapexpr(n.Right, init)
- ptrR, lenR := backingArrayPtrLen(n.Right)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ ptrL, lenL := backingArrayPtrLen(n.Left())
+ n.SetRight(cheapexpr(n.Right(), init))
+ ptrR, lenR := backingArrayPtrLen(n.Right())
fn := syslook("slicecopy")
- fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem())
+ fn = substArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
- return mkcall1(fn, n.Type, init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left.Type.Elem().Width))
+ return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left().Type().Elem().Width))
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
- nl := temp(n.Left.Type)
- nr := temp(n.Right.Type)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
+ nl := temp(n.Left().Type())
+ nr := temp(n.Right().Type())
var l []*ir.Node
- l = append(l, ir.Nod(ir.OAS, nl, n.Left))
- l = append(l, ir.Nod(ir.OAS, nr, n.Right))
+ l = append(l, ir.Nod(ir.OAS, nl, n.Left()))
+ l = append(l, ir.Nod(ir.OAS, nr, n.Right()))
nfrm := ir.Nod(ir.OSPTR, nr, nil)
nto := ir.Nod(ir.OSPTR, nl, nil)
// if n > len(frm) { n = len(frm) }
nif := ir.Nod(ir.OIF, nil, nil)
- nif.Left = ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil))
- nif.Nbody.Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil)))
+ nif.SetLeft(ir.Nod(ir.OGT, nlen, ir.Nod(ir.OLEN, nr, nil)))
+ nif.PtrBody().Append(ir.Nod(ir.OAS, nlen, ir.Nod(ir.OLEN, nr, nil)))
l = append(l, nif)
// if to.ptr != frm.ptr { memmove( ... ) }
l = append(l, ne)
fn := syslook("memmove")
- fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
+ fn = substArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
nwid := temp(types.Types[types.TUINTPTR])
setwid := ir.Nod(ir.OAS, nwid, conv(nlen, types.Types[types.TUINTPTR]))
- ne.Nbody.Append(setwid)
- nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type.Elem().Width))
+ ne.PtrBody().Append(setwid)
+ nwid = ir.Nod(ir.OMUL, nwid, nodintconst(nl.Type().Elem().Width))
call := mkcall1(fn, nil, init, nto, nfrm, nwid)
- ne.Nbody.Append(call)
+ ne.PtrBody().Append(call)
typecheckslice(l, ctxStmt)
walkstmtlist(l)
sym := typesymprefix(".eq", t)
n := NewName(sym)
setNodeNameFunc(n)
- n.Type = functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []*ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)),
}, []*ir.Node{
anonfield(types.Types[types.TBOOL]),
- })
+ }))
return n, false
}
base.Fatalf("eqfor %v", t)
// The result of walkcompare MUST be assigned back to n, e.g.
// n.Left = walkcompare(n.Left, init)
func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node {
- if n.Left.Type.IsInterface() && n.Right.Type.IsInterface() && n.Left.Op != ir.ONIL && n.Right.Op != ir.ONIL {
+ if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL {
return walkcompareInterface(n, init)
}
- if n.Left.Type.IsString() && n.Right.Type.IsString() {
+ if n.Left().Type().IsString() && n.Right().Type().IsString() {
return walkcompareString(n, init)
}
- n.Left = walkexpr(n.Left, init)
- n.Right = walkexpr(n.Right, init)
+ n.SetLeft(walkexpr(n.Left(), init))
+ n.SetRight(walkexpr(n.Right(), init))
// Given mixed interface/concrete comparison,
// rewrite into types-equal && data-equal.
// This is efficient, avoids allocations, and avoids runtime calls.
- if n.Left.Type.IsInterface() != n.Right.Type.IsInterface() {
+ if n.Left().Type().IsInterface() != n.Right().Type().IsInterface() {
// Preserve side-effects in case of short-circuiting; see #32187.
- l := cheapexpr(n.Left, init)
- r := cheapexpr(n.Right, init)
+ l := cheapexpr(n.Left(), init)
+ r := cheapexpr(n.Right(), init)
// Swap so that l is the interface value and r is the concrete value.
- if n.Right.Type.IsInterface() {
+ if n.Right().Type().IsInterface() {
l, r = r, l
}
// Handle both == and !=.
- eq := n.Op
+ eq := n.Op()
andor := ir.OOROR
if eq == ir.OEQ {
andor = ir.OANDAND
// l.tab != nil && l.tab._type == type(r)
var eqtype *ir.Node
tab := ir.Nod(ir.OITAB, l, nil)
- rtyp := typename(r.Type)
- if l.Type.IsEmptyInterface() {
- tab.Type = types.NewPtr(types.Types[types.TUINT8])
+ rtyp := typename(r.Type())
+ if l.Type().IsEmptyInterface() {
+ tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
tab.SetTypecheck(1)
eqtype = ir.Nod(eq, tab, rtyp)
} else {
eqtype = ir.Nod(andor, nonnil, match)
}
// Check for data equal.
- eqdata := ir.Nod(eq, ifaceData(n.Pos, l, r.Type), r)
+ eqdata := ir.Nod(eq, ifaceData(n.Pos(), l, r.Type()), r)
// Put it all together.
expr := ir.Nod(andor, eqtype, eqdata)
n = finishcompare(n, expr, init)
// Otherwise back end handles it.
// While we're here, decide whether to
// inline or call an eq alg.
- t := n.Left.Type
+ t := n.Left().Type()
var inline bool
maxcmpsize := int64(4)
switch t.Etype {
default:
if base.Debug.Libfuzzer != 0 && t.IsInteger() {
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetRight(cheapexpr(n.Right(), init))
// If exactly one comparison operand is
// constant, invoke the constcmp functions
// instead, and arrange for the constant
// operand to be the first argument.
- l, r := n.Left, n.Right
- if r.Op == ir.OLITERAL {
+ l, r := n.Left(), n.Right()
+ if r.Op() == ir.OLITERAL {
l, r = r, l
}
- constcmp := l.Op == ir.OLITERAL && r.Op != ir.OLITERAL
+ constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
var fn string
var paramType *types.Type
inline = t.NumComponents(types.IgnoreBlankFields) <= 4
}
- cmpl := n.Left
- for cmpl != nil && cmpl.Op == ir.OCONVNOP {
- cmpl = cmpl.Left
+ cmpl := n.Left()
+ for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
+ cmpl = cmpl.Left()
}
- cmpr := n.Right
- for cmpr != nil && cmpr.Op == ir.OCONVNOP {
- cmpr = cmpr.Left
+ cmpr := n.Right()
+ for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
+ cmpr = cmpr.Left()
}
// Chose not to inline. Call equality function directly.
fn, needsize := eqfor(t)
call := ir.Nod(ir.OCALL, fn, nil)
- call.List.Append(ir.Nod(ir.OADDR, cmpl, nil))
- call.List.Append(ir.Nod(ir.OADDR, cmpr, nil))
+ call.PtrList().Append(ir.Nod(ir.OADDR, cmpl, nil))
+ call.PtrList().Append(ir.Nod(ir.OADDR, cmpr, nil))
if needsize {
- call.List.Append(nodintconst(t.Width))
+ call.PtrList().Append(nodintconst(t.Width))
}
res := call
- if n.Op != ir.OEQ {
+ if n.Op() != ir.OEQ {
res = ir.Nod(ir.ONOT, res, nil)
}
n = finishcompare(n, res, init)
// inline: build boolean expression comparing element by element
andor := ir.OANDAND
- if n.Op == ir.ONE {
+ if n.Op() == ir.ONE {
andor = ir.OOROR
}
var expr *ir.Node
compare := func(el, er *ir.Node) {
- a := ir.Nod(n.Op, el, er)
+ a := ir.Nod(n.Op(), el, er)
if expr == nil {
expr = a
} else {
}
}
if expr == nil {
- expr = nodbool(n.Op == ir.OEQ)
+ expr = nodbool(n.Op() == ir.OEQ)
// We still need to use cmpl and cmpr, in case they contain
// an expression which might panic. See issue 23837.
- t := temp(cmpl.Type)
+ t := temp(cmpl.Type())
a1 := ir.Nod(ir.OAS, t, cmpl)
a1 = typecheck(a1, ctxStmt)
a2 := ir.Nod(ir.OAS, t, cmpr)
func tracecmpArg(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
- if n.Op == ir.OLITERAL && n.Type.IsSigned() && n.Int64Val() < 0 {
- n = copyexpr(n, n.Type, init)
+ if n.Op() == ir.OLITERAL && n.Type().IsSigned() && n.Int64Val() < 0 {
+ n = copyexpr(n, n.Type(), init)
}
return conv(n, t)
}
func walkcompareInterface(n *ir.Node, init *ir.Nodes) *ir.Node {
- n.Right = cheapexpr(n.Right, init)
- n.Left = cheapexpr(n.Left, init)
- eqtab, eqdata := eqinterface(n.Left, n.Right)
+ n.SetRight(cheapexpr(n.Right(), init))
+ n.SetLeft(cheapexpr(n.Left(), init))
+ eqtab, eqdata := eqinterface(n.Left(), n.Right())
var cmp *ir.Node
- if n.Op == ir.OEQ {
+ if n.Op() == ir.OEQ {
cmp = ir.Nod(ir.OANDAND, eqtab, eqdata)
} else {
- eqtab.Op = ir.ONE
+ eqtab.SetOp(ir.ONE)
cmp = ir.Nod(ir.OOROR, eqtab, ir.Nod(ir.ONOT, eqdata, nil))
}
return finishcompare(n, cmp, init)
// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
var cs, ncs *ir.Node // const string, non-const string
switch {
- case ir.IsConst(n.Left, constant.String) && ir.IsConst(n.Right, constant.String):
+ case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String):
// ignore; will be constant evaluated
- case ir.IsConst(n.Left, constant.String):
- cs = n.Left
- ncs = n.Right
- case ir.IsConst(n.Right, constant.String):
- cs = n.Right
- ncs = n.Left
+ case ir.IsConst(n.Left(), constant.String):
+ cs = n.Left()
+ ncs = n.Right()
+ case ir.IsConst(n.Right(), constant.String):
+ cs = n.Right()
+ ncs = n.Left()
}
if cs != nil {
- cmp := n.Op
+ cmp := n.Op()
// Our comparison below assumes that the non-constant string
// is on the left hand side, so rewrite "" cmp x to x cmp "".
// See issue 24817.
- if ir.IsConst(n.Left, constant.String) {
+ if ir.IsConst(n.Left(), constant.String) {
cmp = brrev(cmp)
}
}
var r *ir.Node
- if n.Op == ir.OEQ || n.Op == ir.ONE {
+ if n.Op() == ir.OEQ || n.Op() == ir.ONE {
// prepare for rewrite below
- n.Left = cheapexpr(n.Left, init)
- n.Right = cheapexpr(n.Right, init)
- eqlen, eqmem := eqstring(n.Left, n.Right)
+ n.SetLeft(cheapexpr(n.Left(), init))
+ n.SetRight(cheapexpr(n.Right(), init))
+ eqlen, eqmem := eqstring(n.Left(), n.Right())
// quick check of len before full compare for == or !=.
// memequal then tests equality up to length len.
- if n.Op == ir.OEQ {
+ if n.Op() == ir.OEQ {
// len(left) == len(right) && memequal(left, right, len)
r = ir.Nod(ir.OANDAND, eqlen, eqmem)
} else {
// len(left) != len(right) || !memequal(left, right, len)
- eqlen.Op = ir.ONE
+ eqlen.SetOp(ir.ONE)
r = ir.Nod(ir.OOROR, eqlen, ir.Nod(ir.ONOT, eqmem, nil))
}
} else {
// sys_cmpstring(s1, s2) :: 0
- r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left, types.Types[types.TSTRING]), conv(n.Right, types.Types[types.TSTRING]))
- r = ir.Nod(n.Op, r, nodintconst(0))
+ r = mkcall("cmpstring", types.Types[types.TINT], init, conv(n.Left(), types.Types[types.TSTRING]), conv(n.Right(), types.Types[types.TSTRING]))
+ r = ir.Nod(n.Op(), r, nodintconst(0))
}
return finishcompare(n, r, init)
// n.Left = finishcompare(n.Left, x, r, init)
func finishcompare(n, r *ir.Node, init *ir.Nodes) *ir.Node {
r = typecheck(r, ctxExpr)
- r = conv(r, n.Type)
+ r = conv(r, n.Type())
r = walkexpr(r, init)
return r
}
// return 1 if integer n must be in range [0, max), 0 otherwise
func bounded(n *ir.Node, max int64) bool {
- if n.Type == nil || !n.Type.IsInteger() {
+ if n.Type() == nil || !n.Type().IsInteger() {
return false
}
- sign := n.Type.IsSigned()
- bits := int32(8 * n.Type.Width)
+ sign := n.Type().IsSigned()
+ bits := int32(8 * n.Type().Width)
if smallintconst(n) {
v := n.Int64Val()
return 0 <= v && v < max
}
- switch n.Op {
+ switch n.Op() {
case ir.OAND, ir.OANDNOT:
v := int64(-1)
switch {
- case smallintconst(n.Left):
- v = n.Left.Int64Val()
- case smallintconst(n.Right):
- v = n.Right.Int64Val()
- if n.Op == ir.OANDNOT {
+ case smallintconst(n.Left()):
+ v = n.Left().Int64Val()
+ case smallintconst(n.Right()):
+ v = n.Right().Int64Val()
+ if n.Op() == ir.OANDNOT {
v = ^v
if !sign {
v &= 1<<uint(bits) - 1
}
case ir.OMOD:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
if 0 <= v && v <= max {
return true
}
}
case ir.ODIV:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
for bits > 0 && v >= 2 {
bits--
v >>= 1
}
case ir.ORSH:
- if !sign && smallintconst(n.Right) {
- v := n.Right.Int64Val()
+ if !sign && smallintconst(n.Right()) {
+ v := n.Right().Int64Val()
if v > int64(bits) {
return true
}
// usemethod checks interface method calls for uses of reflect.Type.Method.
func usemethod(n *ir.Node) {
- t := n.Left.Type
+ t := n.Left().Type()
// Looking for either of:
// Method(int) reflect.Method
// (including global variables such as numImports - was issue #19028).
// Also need to check for reflect package itself (see Issue #38515).
if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
- Curfn.Func.SetReflectMethod(true)
+ Curfn.Func().SetReflectMethod(true)
// The LSym is initialized at this point. We need to set the attribute on the LSym.
- Curfn.Func.LSym.Set(obj.AttrReflectMethod, true)
+ Curfn.Func().LSym.Set(obj.AttrReflectMethod, true)
}
}
return
}
- switch n.Op {
+ switch n.Op() {
default:
- base.Fatalf("usefield %v", n.Op)
+ base.Fatalf("usefield %v", n.Op())
case ir.ODOT, ir.ODOTPTR:
break
}
- if n.Sym == nil {
+ if n.Sym() == nil {
// No field name. This DOTPTR was built by the compiler for access
// to runtime data structures. Ignore.
return
}
- t := n.Left.Type
+ t := n.Left().Type()
if t.IsPtr() {
t = t.Elem()
}
field := n.Opt().(*types.Field)
if field == nil {
- base.Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
+ base.Fatalf("usefield %v %v without paramfld", n.Left().Type(), n.Sym())
}
- if field.Sym != n.Sym || field.Offset != n.Xoffset {
- base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym, n.Xoffset)
+ if field.Sym != n.Sym() || field.Offset != n.Offset() {
+ base.Fatalf("field inconsistency: %v,%v != %v,%v", field.Sym, field.Offset, n.Sym(), n.Offset())
}
if !strings.Contains(field.Note, "go:\"track\"") {
return
}
- outer := n.Left.Type
+ outer := n.Left().Type()
if outer.IsPtr() {
outer = outer.Elem()
}
}
sym := tracksym(outer, field)
- if Curfn.Func.FieldTrack == nil {
- Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
+ if Curfn.Func().FieldTrack == nil {
+ Curfn.Func().FieldTrack = make(map[*types.Sym]struct{})
}
- Curfn.Func.FieldTrack[sym] = struct{}{}
+ Curfn.Func().FieldTrack[sym] = struct{}{}
}
func candiscardlist(l ir.Nodes) bool {
return true
}
- switch n.Op {
+ switch n.Op() {
default:
return false
// Discardable as long as we know it's not division by zero.
case ir.ODIV, ir.OMOD:
- if n.Right.Op == ir.OLITERAL && constant.Sign(n.Right.Val()) != 0 {
+ if n.Right().Op() == ir.OLITERAL && constant.Sign(n.Right().Val()) != 0 {
break
}
return false
// Discardable as long as we know it won't fail because of a bad size.
case ir.OMAKECHAN, ir.OMAKEMAP:
- if ir.IsConst(n.Left, constant.Int) && constant.Sign(n.Left.Val()) == 0 {
+ if ir.IsConst(n.Left(), constant.Int) && constant.Sign(n.Left().Val()) == 0 {
break
}
return false
return false
}
- if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+ if !candiscard(n.Left()) || !candiscard(n.Right()) || !candiscardlist(n.Init()) || !candiscardlist(n.Body()) || !candiscardlist(n.List()) || !candiscardlist(n.Rlist()) {
return false
}
// The result of wrapCall MUST be assigned back to n, e.g.
// n.Left = wrapCall(n.Left, init)
func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node {
- if n.Ninit.Len() != 0 {
- walkstmtlist(n.Ninit.Slice())
- init.AppendNodes(&n.Ninit)
+ if n.Init().Len() != 0 {
+ walkstmtlist(n.Init().Slice())
+ init.AppendNodes(n.PtrInit())
}
- isBuiltinCall := n.Op != ir.OCALLFUNC && n.Op != ir.OCALLMETH && n.Op != ir.OCALLINTER
+ isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
// Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
if !isBuiltinCall && n.IsDDD() {
- last := n.List.Len() - 1
- if va := n.List.Index(last); va.Op == ir.OSLICELIT {
- n.List.Set(append(n.List.Slice()[:last], va.List.Slice()...))
+ last := n.List().Len() - 1
+ if va := n.List().Index(last); va.Op() == ir.OSLICELIT {
+ n.PtrList().Set(append(n.List().Slice()[:last], va.List().Slice()...))
n.SetIsDDD(false)
}
}
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]*ir.Node, n.List.Len())
+ origArgs := make([]*ir.Node, n.List().Len())
t := ir.Nod(ir.OTFUNC, nil, nil)
- for i, arg := range n.List.Slice() {
+ for i, arg := range n.List().Slice() {
s := lookupN("a", i)
- if !isBuiltinCall && arg.Op == ir.OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() {
+ if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.Left().Type().IsUnsafePtr() {
origArgs[i] = arg
- arg = arg.Left
- n.List.SetIndex(i, arg)
+ arg = arg.Left()
+ n.List().SetIndex(i, arg)
}
- t.List.Append(symfield(s, arg.Type))
+ t.PtrList().Append(symfield(s, arg.Type()))
}
wrapCall_prgen++
sym := lookupN("wrap·", wrapCall_prgen)
fn := dclfunc(sym, t)
- args := paramNnames(t.Type)
+ args := paramNnames(t.Type())
for i, origArg := range origArgs {
if origArg == nil {
continue
}
- arg := ir.Nod(origArg.Op, args[i], nil)
- arg.Type = origArg.Type
+ arg := ir.Nod(origArg.Op(), args[i], nil)
+ arg.SetType(origArg.Type())
args[i] = arg
}
- call := ir.Nod(n.Op, nil, nil)
+ call := ir.Nod(n.Op(), nil, nil)
if !isBuiltinCall {
- call.Op = ir.OCALL
- call.Left = n.Left
+ call.SetOp(ir.OCALL)
+ call.SetLeft(n.Left())
call.SetIsDDD(n.IsDDD())
}
- call.List.Set(args)
- fn.Nbody.Set1(call)
+ call.PtrList().Set(args)
+ fn.PtrBody().Set1(call)
funcbody()
fn = typecheck(fn, ctxStmt)
- typecheckslice(fn.Nbody.Slice(), ctxStmt)
+ typecheckslice(fn.Body().Slice(), ctxStmt)
xtop = append(xtop, fn)
call = ir.Nod(ir.OCALL, nil, nil)
- call.Left = fn.Func.Nname
- call.List.Set(n.List.Slice())
+ call.SetLeft(fn.Func().Nname)
+ call.PtrList().Set(n.List().Slice())
call = typecheck(call, ctxStmt)
call = walkexpr(call, init)
return call
for _, t := range types_ {
dowidth(t)
}
- n.Type = types.SubstAny(n.Type, &types_)
+ n.SetType(types.SubstAny(n.Type(), &types_))
if len(types_) > 0 {
base.Fatalf("substArgTypes: too many argument types")
}
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
func isRuneCount(n *ir.Node) bool {
- return base.Flag.N == 0 && !instrumenting && n.Op == ir.OLEN && n.Left.Op == ir.OSTR2RUNES
+ return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.Left().Op() == ir.OSTR2RUNES
}
func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node {
- if !n.Type.IsPtr() {
- base.Fatalf("expected pointer type: %v", n.Type)
+ if !n.Type().IsPtr() {
+ base.Fatalf("expected pointer type: %v", n.Type())
}
- elem := n.Type.Elem()
+ elem := n.Type().Elem()
if count != nil {
if !elem.IsArray() {
base.Fatalf("expected array type: %v", elem)
count = nodintconst(1)
}
- n.Left = cheapexpr(n.Left, init)
- init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left, types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR])))
+ n.SetLeft(cheapexpr(n.Left(), init))
+ init.Append(mkcall("checkptrAlignment", nil, init, convnop(n.Left(), types.Types[types.TUNSAFEPTR]), typename(elem), conv(count, types.Types[types.TUINTPTR])))
return n
}
// TODO(mdempsky): Make stricter. We only need to exempt
// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
- switch n.Left.Op {
+ switch n.Left().Op() {
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
return n
}
- if n.Left.Op == ir.ODOTPTR && isReflectHeaderDataField(n.Left) {
+ if n.Left().Op() == ir.ODOTPTR && isReflectHeaderDataField(n.Left()) {
return n
}
var originals []*ir.Node
var walk func(n *ir.Node)
walk = func(n *ir.Node) {
- switch n.Op {
+ switch n.Op() {
case ir.OADD:
- walk(n.Left)
- walk(n.Right)
+ walk(n.Left())
+ walk(n.Right())
case ir.OSUB, ir.OANDNOT:
- walk(n.Left)
+ walk(n.Left())
case ir.OCONVNOP:
- if n.Left.Type.IsUnsafePtr() {
- n.Left = cheapexpr(n.Left, init)
- originals = append(originals, convnop(n.Left, types.Types[types.TUNSAFEPTR]))
+ if n.Left().Type().IsUnsafePtr() {
+ n.SetLeft(cheapexpr(n.Left(), init))
+ originals = append(originals, convnop(n.Left(), types.Types[types.TUNSAFEPTR]))
}
}
}
- walk(n.Left)
+ walk(n.Left())
n = cheapexpr(n, init)
slice := mkdotargslice(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
- slice.Esc = EscNone
+ slice.SetEsc(EscNone)
init.Append(mkcall("checkptrArithmetic", nil, init, convnop(n, types.Types[types.TUNSAFEPTR]), slice))
// TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
// function fn at a given level. See debugHelpFooter for defined
// levels.
func checkPtr(fn *ir.Node, level int) bool {
- return base.Debug.Checkptr >= level && fn.Func.Pragma&ir.NoCheckPtr == 0
+ return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0
}
isNode := false
if n, ok := x.Interface().(Node); ok {
isNode = true
- p.printf("%s %s {", n.Op.String(), p.addr(x))
+ p.printf("%s %s {", n.op.String(), p.addr(x))
} else {
p.printf("%s {", typ)
}
if base.Debug.DumpPtrs != 0 {
fmt.Fprintf(s, " p(%p)", n)
}
- if !short && n.Name != nil && n.Name.Vargen != 0 {
- fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
+ if !short && n.Name() != nil && n.Name().Vargen != 0 {
+ fmt.Fprintf(s, " g(%d)", n.Name().Vargen)
}
- if base.Debug.DumpPtrs != 0 && !short && n.Name != nil && n.Name.Defn != nil {
+ if base.Debug.DumpPtrs != 0 && !short && n.Name() != nil && n.Name().Defn != nil {
// Useful to see where Defn is set and what node it points to
- fmt.Fprintf(s, " defn(%p)", n.Name.Defn)
+ fmt.Fprintf(s, " defn(%p)", n.Name().Defn)
}
- if n.Pos.IsKnown() {
+ if n.Pos().IsKnown() {
pfx := ""
- switch n.Pos.IsStmt() {
+ switch n.Pos().IsStmt() {
case src.PosNotStmt:
pfx = "_" // "-" would be confusing
case src.PosIsStmt:
pfx = "+"
}
- fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos.Line())
+ fmt.Fprintf(s, " l(%s%d)", pfx, n.Pos().Line())
}
- if !short && n.Xoffset != types.BADWIDTH {
- fmt.Fprintf(s, " x(%d)", n.Xoffset)
+ if !short && n.Offset() != types.BADWIDTH {
+ fmt.Fprintf(s, " x(%d)", n.Offset())
}
if n.Class() != 0 {
fmt.Fprintf(s, " embedded")
}
- if n.Op == ONAME {
- if n.Name.Addrtaken() {
+ if n.Op() == ONAME {
+ if n.Name().Addrtaken() {
fmt.Fprint(s, " addrtaken")
}
- if n.Name.Assigned() {
+ if n.Name().Assigned() {
fmt.Fprint(s, " assigned")
}
- if n.Name.IsClosureVar() {
+ if n.Name().IsClosureVar() {
fmt.Fprint(s, " closurevar")
}
- if n.Name.Captured() {
+ if n.Name().Captured() {
fmt.Fprint(s, " captured")
}
- if n.Name.IsOutputParamHeapAddr() {
+ if n.Name().IsOutputParamHeapAddr() {
fmt.Fprint(s, " outputparamheapaddr")
}
}
fmt.Fprint(s, " hascall")
}
- if !short && n.Name != nil && n.Name.Used() {
+ if !short && n.Name() != nil && n.Name().Used() {
fmt.Fprint(s, " used")
}
}
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
- simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && StmtWithInit(n.Op)
+ simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op())
// otherwise, print the inits as separate statements
- complexinit := n.Ninit.Len() != 0 && !simpleinit && (mode != FErr)
+ complexinit := n.Init().Len() != 0 && !simpleinit && (mode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
- extrablock := complexinit && StmtWithInit(n.Op)
+ extrablock := complexinit && StmtWithInit(n.Op())
if extrablock {
fmt.Fprint(s, "{")
}
if complexinit {
- mode.Fprintf(s, " %v; ", n.Ninit)
+ mode.Fprintf(s, " %v; ", n.Init())
}
- switch n.Op {
+ switch n.Op() {
case ODCL:
- mode.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type)
+ mode.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type())
case ODCLFIELD:
- if n.Sym != nil {
- mode.Fprintf(s, "%v %v", n.Sym, n.Left)
+ if n.Sym() != nil {
+ mode.Fprintf(s, "%v %v", n.Sym(), n.Left())
} else {
- mode.Fprintf(s, "%v", n.Left)
+ mode.Fprintf(s, "%v", n.Left())
}
// Don't export "v = <N>" initializing statements, hope they're always
// the "v = <N>" again.
case OAS:
if n.Colas() && !complexinit {
- mode.Fprintf(s, "%v := %v", n.Left, n.Right)
+ mode.Fprintf(s, "%v := %v", n.Left(), n.Right())
} else {
- mode.Fprintf(s, "%v = %v", n.Left, n.Right)
+ mode.Fprintf(s, "%v = %v", n.Left(), n.Right())
}
case OASOP:
if n.Implicit() {
if n.SubOp() == OADD {
- mode.Fprintf(s, "%v++", n.Left)
+ mode.Fprintf(s, "%v++", n.Left())
} else {
- mode.Fprintf(s, "%v--", n.Left)
+ mode.Fprintf(s, "%v--", n.Left())
}
break
}
- mode.Fprintf(s, "%v %#v= %v", n.Left, n.SubOp(), n.Right)
+ mode.Fprintf(s, "%v %#v= %v", n.Left(), n.SubOp(), n.Right())
case OAS2:
if n.Colas() && !complexinit {
- mode.Fprintf(s, "%.v := %.v", n.List, n.Rlist)
+ mode.Fprintf(s, "%.v := %.v", n.List(), n.Rlist())
break
}
fallthrough
case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- mode.Fprintf(s, "%.v = %v", n.List, n.Right)
+ mode.Fprintf(s, "%.v = %v", n.List(), n.Right())
case ORETURN:
- mode.Fprintf(s, "return %.v", n.List)
+ mode.Fprintf(s, "return %.v", n.List())
case ORETJMP:
- mode.Fprintf(s, "retjmp %v", n.Sym)
+ mode.Fprintf(s, "retjmp %v", n.Sym())
case OINLMARK:
- mode.Fprintf(s, "inlmark %d", n.Xoffset)
+ mode.Fprintf(s, "inlmark %d", n.Offset())
case OGO:
- mode.Fprintf(s, "go %v", n.Left)
+ mode.Fprintf(s, "go %v", n.Left())
case ODEFER:
- mode.Fprintf(s, "defer %v", n.Left)
+ mode.Fprintf(s, "defer %v", n.Left())
case OIF:
if simpleinit {
- mode.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
+ mode.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body())
} else {
- mode.Fprintf(s, "if %v { %v }", n.Left, n.Nbody)
+ mode.Fprintf(s, "if %v { %v }", n.Left(), n.Body())
}
- if n.Rlist.Len() != 0 {
- mode.Fprintf(s, " else { %v }", n.Rlist)
+ if n.Rlist().Len() != 0 {
+ mode.Fprintf(s, " else { %v }", n.Rlist())
}
case OFOR, OFORUNTIL:
opname := "for"
- if n.Op == OFORUNTIL {
+ if n.Op() == OFORUNTIL {
opname = "foruntil"
}
if mode == FErr { // TODO maybe only if FmtShort, same below
fmt.Fprint(s, opname)
if simpleinit {
- mode.Fprintf(s, " %v;", n.Ninit.First())
- } else if n.Right != nil {
+ mode.Fprintf(s, " %v;", n.Init().First())
+ } else if n.Right() != nil {
fmt.Fprint(s, " ;")
}
- if n.Left != nil {
- mode.Fprintf(s, " %v", n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, " %v", n.Left())
}
- if n.Right != nil {
- mode.Fprintf(s, "; %v", n.Right)
+ if n.Right() != nil {
+ mode.Fprintf(s, "; %v", n.Right())
} else if simpleinit {
fmt.Fprint(s, ";")
}
- if n.Op == OFORUNTIL && n.List.Len() != 0 {
- mode.Fprintf(s, "; %v", n.List)
+ if n.Op() == OFORUNTIL && n.List().Len() != 0 {
+ mode.Fprintf(s, "; %v", n.List())
}
- mode.Fprintf(s, " { %v }", n.Nbody)
+ mode.Fprintf(s, " { %v }", n.Body())
case ORANGE:
if mode == FErr {
break
}
- if n.List.Len() == 0 {
- mode.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody)
+ if n.List().Len() == 0 {
+ mode.Fprintf(s, "for range %v { %v }", n.Right(), n.Body())
break
}
- mode.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody)
+ mode.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body())
case OSELECT, OSWITCH:
if mode == FErr {
- mode.Fprintf(s, "%v statement", n.Op)
+ mode.Fprintf(s, "%v statement", n.Op())
break
}
- mode.Fprintf(s, "%#v", n.Op)
+ mode.Fprintf(s, "%#v", n.Op())
if simpleinit {
- mode.Fprintf(s, " %v;", n.Ninit.First())
+ mode.Fprintf(s, " %v;", n.Init().First())
}
- if n.Left != nil {
- mode.Fprintf(s, " %v ", n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, " %v ", n.Left())
}
- mode.Fprintf(s, " { %v }", n.List)
+ mode.Fprintf(s, " { %v }", n.List())
case OCASE:
- if n.List.Len() != 0 {
- mode.Fprintf(s, "case %.v", n.List)
+ if n.List().Len() != 0 {
+ mode.Fprintf(s, "case %.v", n.List())
} else {
fmt.Fprint(s, "default")
}
- mode.Fprintf(s, ": %v", n.Nbody)
+ mode.Fprintf(s, ": %v", n.Body())
case OBREAK, OCONTINUE, OGOTO, OFALL:
- if n.Sym != nil {
- mode.Fprintf(s, "%#v %v", n.Op, n.Sym)
+ if n.Sym() != nil {
+ mode.Fprintf(s, "%#v %v", n.Op(), n.Sym())
} else {
- mode.Fprintf(s, "%#v", n.Op)
+ mode.Fprintf(s, "%#v", n.Op())
}
case OEMPTY:
break
case OLABEL:
- mode.Fprintf(s, "%v: ", n.Sym)
+ mode.Fprintf(s, "%v: ", n.Sym())
}
if extrablock {
}
func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) {
- for n != nil && n.Implicit() && (n.Op == ODEREF || n.Op == OADDR) {
- n = n.Left
+ for n != nil && n.Implicit() && (n.Op() == ODEREF || n.Op() == OADDR) {
+ n = n.Left()
}
if n == nil {
return
}
- nprec := OpPrec[n.Op]
- if n.Op == OTYPE && n.Sym != nil {
+ nprec := OpPrec[n.Op()]
+ if n.Op() == OTYPE && n.Sym() != nil {
nprec = 8
}
return
}
- switch n.Op {
+ switch n.Op() {
case OPAREN:
- mode.Fprintf(s, "(%v)", n.Left)
+ mode.Fprintf(s, "(%v)", n.Left())
case ONIL:
fmt.Fprint(s, "nil")
case OLITERAL: // this is a bit of a mess
if mode == FErr {
- if n.Orig != nil && n.Orig != n {
- exprFmt(n.Orig, s, prec, mode)
+ if n.Orig() != nil && n.Orig() != n {
+ exprFmt(n.Orig(), s, prec, mode)
return
}
- if n.Sym != nil {
- fmt.Fprint(s, smodeString(n.Sym, mode))
+ if n.Sym() != nil {
+ fmt.Fprint(s, smodeString(n.Sym(), mode))
return
}
}
needUnparen := false
- if n.Type != nil && !n.Type.IsUntyped() {
+ if n.Type() != nil && !n.Type().IsUntyped() {
// Need parens when type begins with what might
// be misinterpreted as a unary operator: * or <-.
- if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) {
- mode.Fprintf(s, "(%v)(", n.Type)
+ if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) {
+ mode.Fprintf(s, "(%v)(", n.Type())
} else {
- mode.Fprintf(s, "%v(", n.Type)
+ mode.Fprintf(s, "%v(", n.Type())
}
needUnparen = true
}
- if n.Type == types.UntypedRune {
+ if n.Type() == types.UntypedRune {
switch x, ok := constant.Int64Val(n.Val()); {
case !ok:
fallthrough
case ONAME:
// Special case: name used as local variable in export.
// _ becomes ~b%d internally; print as _ for export
- if mode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
+ if mode == FErr && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' {
fmt.Fprint(s, "_")
return
}
fallthrough
case OPACK, ONONAME, OMETHEXPR:
- fmt.Fprint(s, smodeString(n.Sym, mode))
+ fmt.Fprint(s, smodeString(n.Sym(), mode))
case OTYPE:
- if n.Type == nil && n.Sym != nil {
- fmt.Fprint(s, smodeString(n.Sym, mode))
+ if n.Type() == nil && n.Sym() != nil {
+ fmt.Fprint(s, smodeString(n.Sym(), mode))
return
}
- mode.Fprintf(s, "%v", n.Type)
+ mode.Fprintf(s, "%v", n.Type())
case OTARRAY:
- if n.Left != nil {
- mode.Fprintf(s, "[%v]%v", n.Left, n.Right)
+ if n.Left() != nil {
+ mode.Fprintf(s, "[%v]%v", n.Left(), n.Right())
return
}
- mode.Fprintf(s, "[]%v", n.Right) // happens before typecheck
+ mode.Fprintf(s, "[]%v", n.Right()) // happens before typecheck
case OTMAP:
- mode.Fprintf(s, "map[%v]%v", n.Left, n.Right)
+ mode.Fprintf(s, "map[%v]%v", n.Left(), n.Right())
case OTCHAN:
switch n.TChanDir() {
case types.Crecv:
- mode.Fprintf(s, "<-chan %v", n.Left)
+ mode.Fprintf(s, "<-chan %v", n.Left())
case types.Csend:
- mode.Fprintf(s, "chan<- %v", n.Left)
+ mode.Fprintf(s, "chan<- %v", n.Left())
default:
- if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.TChanDir() == types.Crecv {
- mode.Fprintf(s, "chan (%v)", n.Left)
+ if n.Left() != nil && n.Left().Op() == OTCHAN && n.Left().Sym() == nil && n.Left().TChanDir() == types.Crecv {
+ mode.Fprintf(s, "chan (%v)", n.Left())
} else {
- mode.Fprintf(s, "chan %v", n.Left)
+ mode.Fprintf(s, "chan %v", n.Left())
}
}
fmt.Fprint(s, "func literal")
return
}
- if n.Nbody.Len() != 0 {
- mode.Fprintf(s, "%v { %v }", n.Type, n.Nbody)
+ if n.Body().Len() != 0 {
+ mode.Fprintf(s, "%v { %v }", n.Type(), n.Body())
return
}
- mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Decl.Nbody)
+ mode.Fprintf(s, "%v { %v }", n.Type(), n.Func().Decl.Body())
case OCOMPLIT:
if mode == FErr {
mode.Fprintf(s, "... argument")
return
}
- if n.Right != nil {
- mode.Fprintf(s, "%v{%s}", n.Right, ellipsisIf(n.List.Len() != 0))
+ if n.Right() != nil {
+ mode.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0))
return
}
fmt.Fprint(s, "composite literal")
return
}
- mode.Fprintf(s, "(%v{ %.v })", n.Right, n.List)
+ mode.Fprintf(s, "(%v{ %.v })", n.Right(), n.List())
case OPTRLIT:
- mode.Fprintf(s, "&%v", n.Left)
+ mode.Fprintf(s, "&%v", n.Left())
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
if mode == FErr {
- mode.Fprintf(s, "%v{%s}", n.Type, ellipsisIf(n.List.Len() != 0))
+ mode.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0))
return
}
- mode.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
+ mode.Fprintf(s, "(%v{ %.v })", n.Type(), n.List())
case OKEY:
- if n.Left != nil && n.Right != nil {
- mode.Fprintf(s, "%v:%v", n.Left, n.Right)
+ if n.Left() != nil && n.Right() != nil {
+ mode.Fprintf(s, "%v:%v", n.Left(), n.Right())
return
}
- if n.Left == nil && n.Right != nil {
- mode.Fprintf(s, ":%v", n.Right)
+ if n.Left() == nil && n.Right() != nil {
+ mode.Fprintf(s, ":%v", n.Right())
return
}
- if n.Left != nil && n.Right == nil {
- mode.Fprintf(s, "%v:", n.Left)
+ if n.Left() != nil && n.Right() == nil {
+ mode.Fprintf(s, "%v:", n.Left())
return
}
fmt.Fprint(s, ":")
case OSTRUCTKEY:
- mode.Fprintf(s, "%v:%v", n.Sym, n.Left)
+ mode.Fprintf(s, "%v:%v", n.Sym(), n.Left())
case OCALLPART:
- exprFmt(n.Left, s, nprec, mode)
- if n.Right == nil || n.Right.Sym == nil {
+ exprFmt(n.Left(), s, nprec, mode)
+ if n.Right() == nil || n.Right().Sym() == nil {
fmt.Fprint(s, ".<nil>")
return
}
- mode.Fprintf(s, ".%0S", n.Right.Sym)
+ mode.Fprintf(s, ".%0S", n.Right().Sym())
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
- exprFmt(n.Left, s, nprec, mode)
- if n.Sym == nil {
+ exprFmt(n.Left(), s, nprec, mode)
+ if n.Sym() == nil {
fmt.Fprint(s, ".<nil>")
return
}
- mode.Fprintf(s, ".%0S", n.Sym)
+ mode.Fprintf(s, ".%0S", n.Sym())
case ODOTTYPE, ODOTTYPE2:
- exprFmt(n.Left, s, nprec, mode)
- if n.Right != nil {
- mode.Fprintf(s, ".(%v)", n.Right)
+ exprFmt(n.Left(), s, nprec, mode)
+ if n.Right() != nil {
+ mode.Fprintf(s, ".(%v)", n.Right())
return
}
- mode.Fprintf(s, ".(%v)", n.Type)
+ mode.Fprintf(s, ".(%v)", n.Type())
case OINDEX, OINDEXMAP:
- exprFmt(n.Left, s, nprec, mode)
- mode.Fprintf(s, "[%v]", n.Right)
+ exprFmt(n.Left(), s, nprec, mode)
+ mode.Fprintf(s, "[%v]", n.Right())
case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
- exprFmt(n.Left, s, nprec, mode)
+ exprFmt(n.Left(), s, nprec, mode)
fmt.Fprint(s, "[")
low, high, max := n.SliceBounds()
if low != nil {
if high != nil {
fmt.Fprint(s, modeString(high, mode))
}
- if n.Op.IsSlice3() {
+ if n.Op().IsSlice3() {
fmt.Fprint(s, ":")
if max != nil {
fmt.Fprint(s, modeString(max, mode))
fmt.Fprint(s, "]")
case OSLICEHEADER:
- if n.List.Len() != 2 {
- base.Fatalf("bad OSLICEHEADER list length %d", n.List.Len())
+ if n.List().Len() != 2 {
+ base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len())
}
- mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left, n.List.First(), n.List.Second())
+ mode.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second())
case OCOMPLEX, OCOPY:
- if n.Left != nil {
- mode.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right)
+ if n.Left() != nil {
+ mode.Fprintf(s, "%#v(%v, %v)", n.Op(), n.Left(), n.Right())
} else {
- mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
+ mode.Fprintf(s, "%#v(%.v)", n.Op(), n.List())
}
case OCONV,
OSTR2BYTES,
OSTR2RUNES,
ORUNESTR:
- if n.Type == nil || n.Type.Sym == nil {
- mode.Fprintf(s, "(%v)", n.Type)
+ if n.Type() == nil || n.Type().Sym == nil {
+ mode.Fprintf(s, "(%v)", n.Type())
} else {
- mode.Fprintf(s, "%v", n.Type)
+ mode.Fprintf(s, "%v", n.Type())
}
- if n.Left != nil {
- mode.Fprintf(s, "(%v)", n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, "(%v)", n.Left())
} else {
- mode.Fprintf(s, "(%.v)", n.List)
+ mode.Fprintf(s, "(%.v)", n.List())
}
case OREAL,
OSIZEOF,
OPRINT,
OPRINTN:
- if n.Left != nil {
- mode.Fprintf(s, "%#v(%v)", n.Op, n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, "%#v(%v)", n.Op(), n.Left())
return
}
if n.IsDDD() {
- mode.Fprintf(s, "%#v(%.v...)", n.Op, n.List)
+ mode.Fprintf(s, "%#v(%.v...)", n.Op(), n.List())
return
}
- mode.Fprintf(s, "%#v(%.v)", n.Op, n.List)
+ mode.Fprintf(s, "%#v(%.v)", n.Op(), n.List())
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
- exprFmt(n.Left, s, nprec, mode)
+ exprFmt(n.Left(), s, nprec, mode)
if n.IsDDD() {
- mode.Fprintf(s, "(%.v...)", n.List)
+ mode.Fprintf(s, "(%.v...)", n.List())
return
}
- mode.Fprintf(s, "(%.v)", n.List)
+ mode.Fprintf(s, "(%.v)", n.List())
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- if n.List.Len() != 0 { // pre-typecheck
- mode.Fprintf(s, "make(%v, %.v)", n.Type, n.List)
+ if n.List().Len() != 0 { // pre-typecheck
+ mode.Fprintf(s, "make(%v, %.v)", n.Type(), n.List())
return
}
- if n.Right != nil {
- mode.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right)
+ if n.Right() != nil {
+ mode.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right())
return
}
- if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) {
- mode.Fprintf(s, "make(%v, %v)", n.Type, n.Left)
+ if n.Left() != nil && (n.Op() == OMAKESLICE || !n.Left().Type().IsUntyped()) {
+ mode.Fprintf(s, "make(%v, %v)", n.Type(), n.Left())
return
}
- mode.Fprintf(s, "make(%v)", n.Type)
+ mode.Fprintf(s, "make(%v)", n.Type())
case OMAKESLICECOPY:
- mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type, n.Left, n.Right)
+ mode.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right())
case OPLUS, ONEG, OADDR, OBITNOT, ODEREF, ONOT, ORECV:
// Unary
- mode.Fprintf(s, "%#v", n.Op)
- if n.Left != nil && n.Left.Op == n.Op {
+ mode.Fprintf(s, "%#v", n.Op())
+ if n.Left() != nil && n.Left().Op() == n.Op() {
fmt.Fprint(s, " ")
}
- exprFmt(n.Left, s, nprec+1, mode)
+ exprFmt(n.Left(), s, nprec+1, mode)
// Binary
case OADD,
OSEND,
OSUB,
OXOR:
- exprFmt(n.Left, s, nprec, mode)
- mode.Fprintf(s, " %#v ", n.Op)
- exprFmt(n.Right, s, nprec+1, mode)
+ exprFmt(n.Left(), s, nprec, mode)
+ mode.Fprintf(s, " %#v ", n.Op())
+ exprFmt(n.Right(), s, nprec+1, mode)
case OADDSTR:
- for i, n1 := range n.List.Slice() {
+ for i, n1 := range n.List().Slice() {
if i != 0 {
fmt.Fprint(s, " + ")
}
case ODDD:
mode.Fprintf(s, "...")
default:
- mode.Fprintf(s, "<node %v>", n.Op)
+ mode.Fprintf(s, "<node %v>", n.Op())
}
}
func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) {
- t := n.Type
+ t := n.Type()
// We almost always want the original.
// TODO(gri) Why the special case for OLITERAL?
- if n.Op != OLITERAL && n.Orig != nil {
- n = n.Orig
+ if n.Op() != OLITERAL && n.Orig() != nil {
+ n = n.Orig()
}
if flag&FmtLong != 0 && t != nil {
if t.Etype == types.TNIL {
fmt.Fprint(s, "nil")
- } else if n.Op == ONAME && n.Name.AutoTemp() {
+ } else if n.Op() == ONAME && n.Name().AutoTemp() {
mode.Fprintf(s, "%v value", t)
} else {
mode.Fprintf(s, "%v (type %v)", n, t)
// TODO inlining produces expressions with ninits. we can't print these yet.
- if OpPrec[n.Op] < 0 {
+ if OpPrec[n.Op()] < 0 {
stmtFmt(n, s, mode)
return
}
return
}
- if n.Ninit.Len() != 0 {
- mode.Fprintf(s, "%v-init%v", n.Op, n.Ninit)
+ if n.Init().Len() != 0 {
+ mode.Fprintf(s, "%v-init%v", n.Op(), n.Init())
indent(s)
}
}
- switch n.Op {
+ switch n.Op() {
default:
- mode.Fprintf(s, "%v%j", n.Op, n)
+ mode.Fprintf(s, "%v%j", n.Op(), n)
case OLITERAL:
- mode.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n)
+ mode.Fprintf(s, "%v-%v%j", n.Op(), n.Val(), n)
case ONAME, ONONAME, OMETHEXPR:
- if n.Sym != nil {
- mode.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n)
+ if n.Sym() != nil {
+ mode.Fprintf(s, "%v-%v%j", n.Op(), n.Sym(), n)
} else {
- mode.Fprintf(s, "%v%j", n.Op, n)
+ mode.Fprintf(s, "%v%j", n.Op(), n)
}
- if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
+ if recur && n.Type() == nil && n.Name() != nil && n.Name().Param != nil && n.Name().Param.Ntype != nil {
indent(s)
- mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
+ mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Param.Ntype)
}
case OASOP:
- mode.Fprintf(s, "%v-%v%j", n.Op, n.SubOp(), n)
+ mode.Fprintf(s, "%v-%v%j", n.Op(), n.SubOp(), n)
case OTYPE:
- mode.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type)
- if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
+ mode.Fprintf(s, "%v %v%j type=%v", n.Op(), n.Sym(), n, n.Type())
+ if recur && n.Type() == nil && n.Name() != nil && n.Name().Param != nil && n.Name().Param.Ntype != nil {
indent(s)
- mode.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
+ mode.Fprintf(s, "%v-ntype%v", n.Op(), n.Name().Param.Ntype)
}
}
- if n.Op == OCLOSURE && n.Func.Decl != nil && n.Func.Nname.Sym != nil {
- mode.Fprintf(s, " fnName %v", n.Func.Nname.Sym)
+ if n.Op() == OCLOSURE && n.Func().Decl != nil && n.Func().Nname.Sym() != nil {
+ mode.Fprintf(s, " fnName %v", n.Func().Nname.Sym())
}
- if n.Sym != nil && n.Op != ONAME {
- mode.Fprintf(s, " %v", n.Sym)
+ if n.Sym() != nil && n.Op() != ONAME {
+ mode.Fprintf(s, " %v", n.Sym())
}
- if n.Type != nil {
- mode.Fprintf(s, " %v", n.Type)
+ if n.Type() != nil {
+ mode.Fprintf(s, " %v", n.Type())
}
if recur {
- if n.Left != nil {
- mode.Fprintf(s, "%v", n.Left)
+ if n.Left() != nil {
+ mode.Fprintf(s, "%v", n.Left())
}
- if n.Right != nil {
- mode.Fprintf(s, "%v", n.Right)
+ if n.Right() != nil {
+ mode.Fprintf(s, "%v", n.Right())
}
- if n.Op == OCLOSURE && n.Func != nil && n.Func.Decl != nil && n.Func.Decl.Nbody.Len() != 0 {
+ if n.Op() == OCLOSURE && n.Func() != nil && n.Func().Decl != nil && n.Func().Decl.Body().Len() != 0 {
indent(s)
// The function associated with a closure
- mode.Fprintf(s, "%v-clofunc%v", n.Op, n.Func.Decl)
+ mode.Fprintf(s, "%v-clofunc%v", n.Op(), n.Func().Decl)
}
- if n.Op == ODCLFUNC && n.Func != nil && n.Func.Dcl != nil && len(n.Func.Dcl) != 0 {
+ if n.Op() == ODCLFUNC && n.Func() != nil && n.Func().Dcl != nil && len(n.Func().Dcl) != 0 {
indent(s)
// The dcls for a func or closure
- mode.Fprintf(s, "%v-dcl%v", n.Op, AsNodes(n.Func.Dcl))
+ mode.Fprintf(s, "%v-dcl%v", n.Op(), AsNodes(n.Func().Dcl))
}
- if n.List.Len() != 0 {
+ if n.List().Len() != 0 {
indent(s)
- mode.Fprintf(s, "%v-list%v", n.Op, n.List)
+ mode.Fprintf(s, "%v-list%v", n.Op(), n.List())
}
- if n.Rlist.Len() != 0 {
+ if n.Rlist().Len() != 0 {
indent(s)
- mode.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist)
+ mode.Fprintf(s, "%v-rlist%v", n.Op(), n.Rlist())
}
- if n.Nbody.Len() != 0 {
+ if n.Body().Len() != 0 {
indent(s)
- mode.Fprintf(s, "%v-body%v", n.Op, n.Nbody)
+ mode.Fprintf(s, "%v-body%v", n.Op(), n.Body())
}
}
}
// Line returns n's position as a string. If n has been inlined,
// it uses the outermost position where n has been inlined.
func Line(n *Node) string {
- return base.FmtPos(n.Pos)
+ return base.FmtPos(n.Pos())
}
type Node struct {
// Tree structure.
// Generic recursive walks should follow these fields.
- Left *Node
- Right *Node
- Ninit Nodes
- Nbody Nodes
- List Nodes
- Rlist Nodes
+ left *Node
+ right *Node
+ init Nodes
+ body Nodes
+ list Nodes
+ rlist Nodes
// most nodes
- Type *types.Type
- Orig *Node // original form, for printing, and tracking copies of ONAMEs
+ typ *types.Type
+ orig *Node // original form, for printing, and tracking copies of ONAMEs
// func
- Func *Func
+ fn *Func
// ONAME, OTYPE, OPACK, OLABEL, some OLITERAL
- Name *Name
+ name *Name
- Sym *types.Sym // various
- E interface{} // Opt or Val, see methods below
+ sym *types.Sym // various
+ e interface{} // Opt or Val, see methods below
// Various. Usually an offset into a struct. For example:
// - ONAME nodes that refer to local variables use it to identify their stack frame position.
// - OINLMARK stores an index into the inlTree data structure.
// - OCLOSURE uses it to store ambient iota value, if any.
// Possibly still more uses. If you find any, document them.
- Xoffset int64
+ offset int64
- Pos src.XPos
+ pos src.XPos
flags bitset32
- Esc uint16 // EscXXX
+ esc uint16 // EscXXX
- Op Op
+ op Op
aux uint8
}
-func (n *Node) GetLeft() *Node { return n.Left }
-func (n *Node) SetLeft(x *Node) { n.Left = x }
-func (n *Node) GetRight() *Node { return n.Right }
-func (n *Node) SetRight(x *Node) { n.Right = x }
-func (n *Node) GetOrig() *Node { return n.Orig }
-func (n *Node) SetOrig(x *Node) { n.Orig = x }
-func (n *Node) GetType() *types.Type { return n.Type }
-func (n *Node) SetType(x *types.Type) { n.Type = x }
-func (n *Node) GetFunc() *Func { return n.Func }
-func (n *Node) SetFunc(x *Func) { n.Func = x }
-func (n *Node) GetName() *Name { return n.Name }
-func (n *Node) SetName(x *Name) { n.Name = x }
-func (n *Node) GetSym() *types.Sym { return n.Sym }
-func (n *Node) SetSym(x *types.Sym) { n.Sym = x }
-func (n *Node) GetPos() src.XPos { return n.Pos }
-func (n *Node) SetPos(x src.XPos) { n.Pos = x }
-func (n *Node) GetXoffset() int64 { return n.Xoffset }
-func (n *Node) SetXoffset(x int64) { n.Xoffset = x }
-func (n *Node) GetEsc() uint16 { return n.Esc }
-func (n *Node) SetEsc(x uint16) { n.Esc = x }
-func (n *Node) GetOp() Op { return n.Op }
-func (n *Node) SetOp(x Op) { n.Op = x }
-func (n *Node) GetNinit() Nodes { return n.Ninit }
-func (n *Node) SetNinit(x Nodes) { n.Ninit = x }
-func (n *Node) PtrNinit() *Nodes { return &n.Ninit }
-func (n *Node) GetNbody() Nodes { return n.Nbody }
-func (n *Node) SetNbody(x Nodes) { n.Nbody = x }
-func (n *Node) PtrNbody() *Nodes { return &n.Nbody }
-func (n *Node) GetList() Nodes { return n.List }
-func (n *Node) SetList(x Nodes) { n.List = x }
-func (n *Node) PtrList() *Nodes { return &n.List }
-func (n *Node) GetRlist() Nodes { return n.Rlist }
-func (n *Node) SetRlist(x Nodes) { n.Rlist = x }
-func (n *Node) PtrRlist() *Nodes { return &n.Rlist }
+func (n *Node) Left() *Node { return n.left }
+func (n *Node) SetLeft(x *Node) { n.left = x }
+func (n *Node) Right() *Node { return n.right }
+func (n *Node) SetRight(x *Node) { n.right = x }
+func (n *Node) Orig() *Node { return n.orig }
+func (n *Node) SetOrig(x *Node) { n.orig = x }
+func (n *Node) Type() *types.Type { return n.typ }
+func (n *Node) SetType(x *types.Type) { n.typ = x }
+func (n *Node) Func() *Func { return n.fn }
+func (n *Node) SetFunc(x *Func) { n.fn = x }
+func (n *Node) Name() *Name { return n.name }
+func (n *Node) SetName(x *Name) { n.name = x }
+func (n *Node) Sym() *types.Sym { return n.sym }
+func (n *Node) SetSym(x *types.Sym) { n.sym = x }
+func (n *Node) Pos() src.XPos { return n.pos }
+func (n *Node) SetPos(x src.XPos) { n.pos = x }
+func (n *Node) Offset() int64 { return n.offset }
+func (n *Node) SetOffset(x int64) { n.offset = x }
+func (n *Node) Esc() uint16 { return n.esc }
+func (n *Node) SetEsc(x uint16) { n.esc = x }
+func (n *Node) Op() Op { return n.op }
+func (n *Node) SetOp(x Op) { n.op = x }
+func (n *Node) Init() Nodes { return n.init }
+func (n *Node) SetInit(x Nodes) { n.init = x }
+func (n *Node) PtrInit() *Nodes { return &n.init }
+func (n *Node) Body() Nodes { return n.body }
+func (n *Node) SetBody(x Nodes) { n.body = x }
+func (n *Node) PtrBody() *Nodes { return &n.body }
+func (n *Node) List() Nodes { return n.list }
+func (n *Node) SetList(x Nodes) { n.list = x }
+func (n *Node) PtrList() *Nodes { return &n.list }
+func (n *Node) Rlist() Nodes { return n.rlist }
+func (n *Node) SetRlist(x Nodes) { n.rlist = x }
+func (n *Node) PtrRlist() *Nodes { return &n.rlist }
func (n *Node) ResetAux() {
n.aux = 0
}
func (n *Node) SubOp() Op {
- switch n.Op {
+ switch n.Op() {
case OASOP, ONAME:
default:
- base.Fatalf("unexpected op: %v", n.Op)
+ base.Fatalf("unexpected op: %v", n.Op())
}
return Op(n.aux)
}
func (n *Node) SetSubOp(op Op) {
- switch n.Op {
+ switch n.Op() {
case OASOP, ONAME:
default:
- base.Fatalf("unexpected op: %v", n.Op)
+ base.Fatalf("unexpected op: %v", n.Op())
}
n.aux = uint8(op)
}
func (n *Node) IndexMapLValue() bool {
- if n.Op != OINDEXMAP {
- base.Fatalf("unexpected op: %v", n.Op)
+ if n.Op() != OINDEXMAP {
+ base.Fatalf("unexpected op: %v", n.Op())
}
return n.aux != 0
}
func (n *Node) SetIndexMapLValue(b bool) {
- if n.Op != OINDEXMAP {
- base.Fatalf("unexpected op: %v", n.Op)
+ if n.Op() != OINDEXMAP {
+ base.Fatalf("unexpected op: %v", n.Op())
}
if b {
n.aux = 1
}
func (n *Node) TChanDir() types.ChanDir {
- if n.Op != OTCHAN {
- base.Fatalf("unexpected op: %v", n.Op)
+ if n.Op() != OTCHAN {
+ base.Fatalf("unexpected op: %v", n.Op())
}
return types.ChanDir(n.aux)
}
func (n *Node) SetTChanDir(dir types.ChanDir) {
- if n.Op != OTCHAN {
- base.Fatalf("unexpected op: %v", n.Op)
+ if n.Op() != OTCHAN {
+ base.Fatalf("unexpected op: %v", n.Op())
}
n.aux = uint8(dir)
}
func IsSynthetic(n *Node) bool {
- name := n.Sym.Name
+ name := n.Sym().Name
return name[0] == '.' || name[0] == '~'
}
// IsAutoTmp indicates if n was created by the compiler as a temporary,
// based on the setting of the .AutoTemp flag in n's Name.
func IsAutoTmp(n *Node) bool {
- if n == nil || n.Op != ONAME {
+ if n == nil || n.Op() != ONAME {
return false
}
- return n.Name.AutoTemp()
+ return n.Name().AutoTemp()
}
const (
func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
-func (n *Node) SetHasVal(b bool) { n.flags.set(nodeHasVal, b) }
-func (n *Node) SetHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
+func (n *Node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) }
+func (n *Node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
// MarkNonNil marks a pointer n as being guaranteed non-nil,
// During conversion to SSA, non-nil pointers won't have nil checks
// inserted before dereferencing. See state.exprPtr.
func (n *Node) MarkNonNil() {
- if !n.Type.IsPtr() && !n.Type.IsUnsafePtr() {
- base.Fatalf("MarkNonNil(%v), type %v", n, n.Type)
+ if !n.Type().IsPtr() && !n.Type().IsUnsafePtr() {
+ base.Fatalf("MarkNonNil(%v), type %v", n, n.Type())
}
n.flags.set(nodeNonNil, true)
}
// When n is a dereferencing operation, n does not need nil checks.
// When n is a makeslice+copy operation, n does not need length and cap checks.
func (n *Node) SetBounded(b bool) {
- switch n.Op {
+ switch n.Op() {
case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
// No bounds checks needed.
case ODOTPTR, ODEREF:
// MarkReadonly indicates that n is an ONAME with readonly contents.
func (n *Node) MarkReadonly() {
- if n.Op != ONAME {
- base.Fatalf("Node.MarkReadonly %v", n.Op)
+ if n.Op() != ONAME {
+ base.Fatalf("Node.MarkReadonly %v", n.Op())
}
- n.Name.SetReadonly(true)
+ n.Name().SetReadonly(true)
// Mark the linksym as readonly immediately
// so that the SSA backend can use this information.
// It will be overridden later during dumpglobls.
- n.Sym.Linksym().Type = objabi.SRODATA
+ n.Sym().Linksym().Type = objabi.SRODATA
}
// Val returns the constant.Value for the node.
if !n.HasVal() {
return constant.MakeUnknown()
}
- return *n.E.(*constant.Value)
+ return *n.e.(*constant.Value)
}
// SetVal sets the constant.Value for the node,
Dump("have Opt", n)
base.Fatalf("have Opt")
}
- if n.Op == OLITERAL {
- AssertValidTypeForConst(n.Type, v)
+ if n.Op() == OLITERAL {
+ AssertValidTypeForConst(n.Type(), v)
}
- n.SetHasVal(true)
- n.E = &v
+ n.setHasVal(true)
+ n.e = &v
}
// Opt returns the optimizer data for the node.
if !n.HasOpt() {
return nil
}
- return n.E
+ return n.e
}
// SetOpt sets the optimizer data for the node, which must not have been used with SetVal.
func (n *Node) SetOpt(x interface{}) {
if x == nil {
if n.HasOpt() {
- n.SetHasOpt(false)
- n.E = nil
+ n.setHasOpt(false)
+ n.e = nil
}
return
}
Dump("have Val", n)
base.Fatalf("have Val")
}
- n.SetHasOpt(true)
- n.E = x
+ n.setHasOpt(true)
+ n.e = x
}
func (n *Node) Iota() int64 {
- return n.Xoffset
+ return n.Offset()
}
func (n *Node) SetIota(x int64) {
- n.Xoffset = x
+ n.SetOffset(x)
}
// mayBeShared reports whether n may occur in multiple places in the AST.
// Extra care must be taken when mutating such a node.
func MayBeShared(n *Node) bool {
- switch n.Op {
+ switch n.Op() {
case ONAME, OLITERAL, ONIL, OTYPE:
return true
}
// funcname returns the name (without the package) of the function n.
func FuncName(n *Node) string {
- if n == nil || n.Func == nil || n.Func.Nname == nil {
+ if n == nil || n.Func() == nil || n.Func().Nname == nil {
return "<nil>"
}
- return n.Func.Nname.Sym.Name
+ return n.Func().Nname.Sym().Name
}
// pkgFuncName returns the name of the function referenced by n, with package prepended.
if n == nil {
return "<nil>"
}
- if n.Op == ONAME {
- s = n.Sym
+ if n.Op() == ONAME {
+ s = n.Sym()
} else {
- if n.Func == nil || n.Func.Nname == nil {
+ if n.Func() == nil || n.Func().Nname == nil {
return "<nil>"
}
- s = n.Func.Nname.Sym
+ s = n.Func().Nname.Sym()
}
pkg := s.Pkg
if n == nil || !f(n) {
return
}
- InspectList(n.Ninit, f)
- Inspect(n.Left, f)
- Inspect(n.Right, f)
- InspectList(n.List, f)
- InspectList(n.Nbody, f)
- InspectList(n.Rlist, f)
+ InspectList(n.Init(), f)
+ Inspect(n.Left(), f)
+ Inspect(n.Right(), f)
+ InspectList(n.List(), f)
+ InspectList(n.Body(), f)
+ InspectList(n.Rlist(), f)
}
func InspectList(l Nodes, f func(*Node) bool) {
f Func
}
n = &x.n
- n.Func = &x.f
- n.Func.Decl = n
+ n.SetFunc(&x.f)
+ n.Func().Decl = n
case ONAME:
base.Fatalf("use newname instead")
case OLABEL, OPACK:
m Name
}
n = &x.n
- n.Name = &x.m
+ n.SetName(&x.m)
default:
n = new(Node)
}
- n.Op = op
- n.Left = nleft
- n.Right = nright
- n.Pos = pos
- n.Xoffset = types.BADWIDTH
- n.Orig = n
+ n.SetOp(op)
+ n.SetLeft(nleft)
+ n.SetRight(nright)
+ n.SetPos(pos)
+ n.SetOffset(types.BADWIDTH)
+ n.SetOrig(n)
return n
}
p Param
}
n := &x.n
- n.Name = &x.m
- n.Name.Param = &x.p
+ n.SetName(&x.m)
+ n.Name().Param = &x.p
- n.Op = ONAME
- n.Pos = pos
- n.Orig = n
+ n.SetOp(ONAME)
+ n.SetPos(pos)
+ n.SetOrig(n)
- n.Sym = s
+ n.SetSym(s)
return n
}
return nil
case 'b': // originally the blank identifier _
// TODO(mdempsky): Does s.Pkg matter here?
- return BlankNode.Sym
+ return BlankNode.Sym()
}
return s
}
// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
// n must be a slice expression. max is nil if n is a simple slice expression.
func (n *Node) SliceBounds() (low, high, max *Node) {
- if n.List.Len() == 0 {
+ if n.List().Len() == 0 {
return nil, nil, nil
}
- switch n.Op {
+ switch n.Op() {
case OSLICE, OSLICEARR, OSLICESTR:
- s := n.List.Slice()
+ s := n.List().Slice()
return s[0], s[1], nil
case OSLICE3, OSLICE3ARR:
- s := n.List.Slice()
+ s := n.List().Slice()
return s[0], s[1], s[2]
}
- base.Fatalf("SliceBounds op %v: %v", n.Op, n)
+ base.Fatalf("SliceBounds op %v: %v", n.Op(), n)
return nil, nil, nil
}
// SetSliceBounds sets n's slice bounds, where n is a slice expression.
// n must be a slice expression. If max is non-nil, n must be a full slice expression.
func (n *Node) SetSliceBounds(low, high, max *Node) {
- switch n.Op {
+ switch n.Op() {
case OSLICE, OSLICEARR, OSLICESTR:
if max != nil {
- base.Fatalf("SetSliceBounds %v given three bounds", n.Op)
+ base.Fatalf("SetSliceBounds %v given three bounds", n.Op())
}
- s := n.List.Slice()
+ s := n.List().Slice()
if s == nil {
if low == nil && high == nil {
return
}
- n.List.Set2(low, high)
+ n.PtrList().Set2(low, high)
return
}
s[0] = low
s[1] = high
return
case OSLICE3, OSLICE3ARR:
- s := n.List.Slice()
+ s := n.List().Slice()
if s == nil {
if low == nil && high == nil && max == nil {
return
}
- n.List.Set3(low, high, max)
+ n.PtrList().Set3(low, high, max)
return
}
s[0] = low
s[2] = max
return
}
- base.Fatalf("SetSliceBounds op %v: %v", n.Op, n)
+ base.Fatalf("SetSliceBounds op %v: %v", n.Op(), n)
}
// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
// Orig pointing to itself.
func SepCopy(n *Node) *Node {
copy := *n
- copy.Orig = ©
+ copy.orig = ©
return ©
}
// messages; see issues #26855, #27765).
func Copy(n *Node) *Node {
copy := *n
- if n.Orig == n {
- copy.Orig = ©
+ if n.Orig() == n {
+ copy.orig = ©
}
return ©
}
func IsNil(n *Node) bool {
// Check n.Orig because constant propagation may produce typed nil constants,
// which don't exist in the Go spec.
- return n.Orig.Op == ONIL
+ return n.Orig().Op() == ONIL
}
func IsBlank(n *Node) bool {
if n == nil {
return false
}
- return n.Sym.IsBlank()
+ return n.Sym().IsBlank()
}
// IsMethod reports whether n is a method.
// n must be a function or a method.
func IsMethod(n *Node) bool {
- return n.Type.Recv() != nil
+ return n.Type().Recv() != nil
}
)
func ConstType(n *Node) constant.Kind {
- if n == nil || n.Op != OLITERAL {
+ if n == nil || n.Op() != OLITERAL {
return constant.Unknown
}
return n.Val().Kind()
case constant.String:
return constant.StringVal(v)
case constant.Int:
- return Int64Val(n.Type, v)
+ return Int64Val(n.Type(), v)
case constant.Float:
return Float64Val(v)
case constant.Complex:
func NewLiteral(v constant.Value) *Node {
n := Nod(OLITERAL, nil, nil)
if k := v.Kind(); k != constant.Unknown {
- n.Type = idealType(k)
+ n.SetType(idealType(k))
n.SetVal(v)
}
return n
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
- if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Node).Type.HasPointers()) {
+ if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Node).Type().HasPointers()) {
// These ops don't really change memory.
continue
// Note: OpVarDef requires that the defined variable not have pointers.