The plan is to introduce a Node interface that replaces the old *Node pointer-to-struct.
The previous CL defined an interface INode modeling a *Node.
This CL:
- Changes all references outside internal/ir to use INode,
along with many references inside internal/ir as well.
- Renames Node to node.
- Renames INode to Node
So now ir.Node is an interface implemented by *ir.node, which is otherwise inaccessible,
and the code outside package ir is now (clearly) using only the interface.
The usual rule is never to redefine an existing name with a new meaning,
so that old code that hasn't been updated gets a "unknown name" error
instead of more mysterious errors or silent misbehavior. That rule would
caution against replacing Node-the-struct with Node-the-interface,
as in this CL, because code that says *Node would now be using a pointer
to an interface. But this CL is being landed at the same time as another that
moves Node from gc to ir. So the net effect is to replace *gc.Node with ir.Node,
which does follow the rule: any lingering references to gc.Node will be told
it's gone, not silently start using pointers to interfaces. So the rule is followed
by the CL sequence, just not this specific CL.
Overall, the loss of inlining caused by using interfaces cuts the compiler speed
by about 6%, a not insignificant amount. However, as we convert the representation
to concrete structs that are not the giant Node over the next weeks, that speed
should come back as more of the compiler starts operating directly on concrete types
and the memory taken up by the graph of Nodes drops due to the more precise
structs. Honestly, I was expecting worse.
% benchstat bench.old bench.new
name old time/op new time/op delta
Template 168ms ± 4% 182ms ± 2% +8.34% (p=0.000 n=9+9)
Unicode 72.2ms ±10% 82.5ms ± 6% +14.38% (p=0.000 n=9+9)
GoTypes 563ms ± 8% 598ms ± 2% +6.14% (p=0.006 n=9+9)
Compiler 2.89s ± 4% 3.04s ± 2% +5.37% (p=0.000 n=10+9)
SSA 6.45s ± 4% 7.25s ± 5% +12.41% (p=0.000 n=9+10)
Flate 105ms ± 2% 115ms ± 1% +9.66% (p=0.000 n=10+8)
GoParser 144ms ±10% 152ms ± 2% +5.79% (p=0.011 n=9+8)
Reflect 345ms ± 9% 370ms ± 4% +7.28% (p=0.001 n=10+9)
Tar 149ms ± 9% 161ms ± 5% +8.05% (p=0.001 n=10+9)
XML 190ms ± 3% 209ms ± 2% +9.54% (p=0.000 n=9+8)
LinkCompiler 327ms ± 2% 325ms ± 2% ~ (p=0.382 n=8+8)
ExternalLinkCompiler 1.77s ± 4% 1.73s ± 6% ~ (p=0.113 n=9+10)
LinkWithoutDebugCompiler 214ms ± 4% 211ms ± 2% ~ (p=0.360 n=10+8)
StdCmd 14.8s ± 3% 15.9s ± 1% +6.98% (p=0.000 n=10+9)
[Geo mean] 480ms 510ms +6.31%
name old user-time/op new user-time/op delta
Template 223ms ± 3% 237ms ± 3% +6.16% (p=0.000 n=9+10)
Unicode 103ms ± 6% 113ms ± 3% +9.53% (p=0.000 n=9+9)
GoTypes 758ms ± 8% 800ms ± 2% +5.55% (p=0.003 n=10+9)
Compiler 3.95s ± 2% 4.12s ± 2% +4.34% (p=0.000 n=10+9)
SSA 9.43s ± 1% 9.74s ± 4% +3.25% (p=0.000 n=8+10)
Flate 132ms ± 2% 141ms ± 2% +6.89% (p=0.000 n=9+9)
GoParser 177ms ± 9% 183ms ± 4% ~ (p=0.050 n=9+9)
Reflect 467ms ±10% 495ms ± 7% +6.17% (p=0.029 n=10+10)
Tar 183ms ± 9% 197ms ± 5% +7.92% (p=0.001 n=10+10)
XML 249ms ± 5% 268ms ± 4% +7.82% (p=0.000 n=10+9)
LinkCompiler 544ms ± 5% 544ms ± 6% ~ (p=0.863 n=9+9)
ExternalLinkCompiler 1.79s ± 4% 1.75s ± 6% ~ (p=0.075 n=10+10)
LinkWithoutDebugCompiler 248ms ± 6% 246ms ± 2% ~ (p=0.965 n=10+8)
[Geo mean] 483ms 504ms +4.41%
[git-generate]
cd src/cmd/compile/internal/ir
: # We need to do the conversion in multiple steps, so we introduce
: # a temporary type alias that will start out meaning the pointer-to-struct
: # and then change to mean the interface.
rf '
mv Node OldNode
add node.go \
type Node = *OldNode
'
: # It should work to do this ex in ir, but it misses test files, due to a bug in rf.
: # Run the command in gc to handle gc's tests, and then again in ssa for ssa's tests.
cd ../gc
rf '
ex . ../arm ../riscv64 ../arm64 ../mips64 ../ppc64 ../mips ../wasm {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
cd ../ssa
rf '
ex {
import "cmd/compile/internal/ir"
*ir.OldNode -> ir.Node
}
'
: # Back in ir, finish conversion clumsily with sed,
: # because type checking and circular aliases do not mix.
cd ../ir
sed -i '' '
/type Node = \*OldNode/d
s/\*OldNode/Node/g
s/^func (n Node)/func (n *OldNode)/
s/OldNode/node/g
s/type INode interface/type Node interface/
s/var _ INode = (Node)(nil)/var _ Node = (*node)(nil)/
' *.go
gofmt -w *.go
sed -i '' '
s/{Func{}, 136, 248}/{Func{}, 152, 280}/
s/{Name{}, 32, 56}/{Name{}, 44, 80}/
s/{Param{}, 24, 48}/{Param{}, 44, 88}/
s/{node{}, 76, 128}/{node{}, 88, 152}/
' sizeof_test.go
cd ../ssa
sed -i '' '
s/{LocalSlot{}, 28, 40}/{LocalSlot{}, 32, 48}/
' sizeof_test.go
cd ../gc
sed -i '' 's/\*ir.Node/ir.Node/' mkbuiltin.go
cd ../../../..
go install std cmd
cd cmd/compile
go test -u || go test -u
Change-Id: I196bbe3b648e4701662e4a2bada40bf155e2a553
Reviewed-on: https://go-review.googlesource.com/c/go/+/272935
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
var knownFormats = map[string]string{
"*bytes.Buffer %s": "",
"*cmd/compile/internal/gc.EscLocation %v": "",
- "*cmd/compile/internal/ir.Node %#v": "",
- "*cmd/compile/internal/ir.Node %+S": "",
- "*cmd/compile/internal/ir.Node %+v": "",
- "*cmd/compile/internal/ir.Node %L": "",
- "*cmd/compile/internal/ir.Node %S": "",
- "*cmd/compile/internal/ir.Node %j": "",
- "*cmd/compile/internal/ir.Node %p": "",
- "*cmd/compile/internal/ir.Node %v": "",
+ "*cmd/compile/internal/ir.node %v": "",
"*cmd/compile/internal/ssa.Block %s": "",
"*cmd/compile/internal/ssa.Block %v": "",
"*cmd/compile/internal/ssa.Func %s": "",
"cmd/compile/internal/ir.Class %d": "",
"cmd/compile/internal/ir.Class %v": "",
"cmd/compile/internal/ir.FmtMode %d": "",
+ "cmd/compile/internal/ir.Node %#v": "",
+ "cmd/compile/internal/ir.Node %+S": "",
+ "cmd/compile/internal/ir.Node %+v": "",
+ "cmd/compile/internal/ir.Node %L": "",
+ "cmd/compile/internal/ir.Node %S": "",
+ "cmd/compile/internal/ir.Node %j": "",
+ "cmd/compile/internal/ir.Node %p": "",
+ "cmd/compile/internal/ir.Node %v": "",
"cmd/compile/internal/ir.Nodes %#v": "",
"cmd/compile/internal/ir.Nodes %+v": "",
"cmd/compile/internal/ir.Nodes %.v": "",
"interface{} %q": "",
"interface{} %s": "",
"interface{} %v": "",
- "map[*cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "",
- "map[*cmd/compile/internal/ir.Node][]*cmd/compile/internal/ir.Node %v": "",
- "map[cmd/compile/internal/ssa.ID]uint32 %v": "",
+ "map[cmd/compile/internal/ir.Node]*cmd/compile/internal/ssa.Value %v": "",
+ "map[cmd/compile/internal/ir.Node][]cmd/compile/internal/ir.Node %v": "",
+ "map[cmd/compile/internal/ssa.ID]uint32 %v": "",
"map[int64]uint32 %v": "",
"math/big.Accuracy %s": "",
"reflect.Type %s": "",
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
return closure
}
-func hashfor(t *types.Type) *ir.Node {
+func hashfor(t *types.Type) ir.Node {
var sym *types.Sym
switch a, _ := algtype1(t); a {
n := NewName(sym)
setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[types.TUINTPTR]),
- }, []*ir.Node{
+ }, []ir.Node{
anonfield(types.Types[types.TUINTPTR]),
}))
return n
//
// TODO(josharian): consider doing some loop unrolling
// for larger nelem as well, processing a few elements at a time in a loop.
- checkAll := func(unroll int64, last bool, eq func(pi, qi *ir.Node) *ir.Node) {
+ checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
// checkIdx generates a node to check for equality at index i.
- checkIdx := func(i *ir.Node) *ir.Node {
+ checkIdx := func(i ir.Node) ir.Node {
// pi := p[i]
pi := ir.Nod(ir.OINDEX, np, i)
pi.SetBounded(true)
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// TODO: when the array size is small, unroll the length match checks.
- checkAll(3, false, func(pi, qi *ir.Node) *ir.Node {
+ checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
eqlen, _ := eqstring(pi, qi)
return eqlen
})
- checkAll(1, true, func(pi, qi *ir.Node) *ir.Node {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents.
_, eqmem := eqstring(pi, qi)
return eqmem
})
case types.TFLOAT32, types.TFLOAT64:
- checkAll(2, true, func(pi, qi *ir.Node) *ir.Node {
+ checkAll(2, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
return ir.Nod(ir.OEQ, pi, qi)
})
// TODO: pick apart structs, do them piecemeal too
default:
- checkAll(1, true, func(pi, qi *ir.Node) *ir.Node {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// p[i] == q[i]
return ir.Nod(ir.OEQ, pi, qi)
})
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
- var conds [][]*ir.Node
- conds = append(conds, []*ir.Node{})
- and := func(n *ir.Node) {
+ var conds [][]ir.Node
+ conds = append(conds, []ir.Node{})
+ and := func(n ir.Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
- conds = append(conds, []*ir.Node{})
+ conds = append(conds, []ir.Node{})
}
p := nodSym(ir.OXDOT, np, f.Sym)
q := nodSym(ir.OXDOT, nq, f.Sym)
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
- conds = append(conds, []*ir.Node{})
+ conds = append(conds, []ir.Node{})
}
i++
continue
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
- var flatConds []*ir.Node
+ var flatConds []ir.Node
for _, c := range conds {
- isCall := func(n *ir.Node) bool {
+ isCall := func(n ir.Node) bool {
return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return closure
}
-func hasCall(n *ir.Node) bool {
+func hasCall(n ir.Node) bool {
if n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC {
return true
}
// eqfield returns the node
// p.field == q.field
-func eqfield(p *ir.Node, q *ir.Node, field *types.Sym) *ir.Node {
+func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
nx := nodSym(ir.OXDOT, p, field)
ny := nodSym(ir.OXDOT, q, field)
ne := ir.Nod(ir.OEQ, nx, ny)
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
-func eqstring(s, t *ir.Node) (eqlen, eqmem *ir.Node) {
+func eqstring(s, t ir.Node) (eqlen, eqmem ir.Node) {
s = conv(s, types.Types[types.TSTRING])
t = conv(t, types.Types[types.TSTRING])
sptr := ir.Nod(ir.OSPTR, s, nil)
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
-func eqinterface(s, t *ir.Node) (eqtab, eqdata *ir.Node) {
+func eqinterface(s, t ir.Node) (eqtab, eqdata ir.Node) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
- var fn *ir.Node
+ var fn ir.Node
if s.Type().IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
-func eqmem(p *ir.Node, q *ir.Node, field *types.Sym, size int64) *ir.Node {
+func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
nx := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, p, field), nil)
ny := ir.Nod(ir.OADDR, nodSym(ir.OXDOT, q, field), nil)
nx = typecheck(nx, ctxExpr)
return call
}
-func eqmemfunc(size int64, t *types.Type) (fn *ir.Node, needsize bool) {
+func eqmemfunc(size int64, t *types.Type) (fn ir.Node, needsize bool) {
switch size {
default:
fn = syslook("memequal")
}
// markObject visits a reachable object.
-func (p *exporter) markObject(n *ir.Node) {
+func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME && n.Class() == ir.PFUNC {
inlFlood(n)
}
"cmd/internal/src"
)
-func npos(pos src.XPos, n *ir.Node) *ir.Node {
+func npos(pos src.XPos, n ir.Node) ir.Node {
n.SetPos(pos)
return n
}
-func builtinCall(op ir.Op) *ir.Node {
+func builtinCall(op ir.Op) ir.Node {
return ir.Nod(ir.OCALL, mkname(ir.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[types.TANY]
typs[3] = types.NewPtr(typs[2])
- typs[4] = functype(nil, []*ir.Node{anonfield(typs[1])}, []*ir.Node{anonfield(typs[3])})
+ typs[4] = functype(nil, []ir.Node{anonfield(typs[1])}, []ir.Node{anonfield(typs[3])})
typs[5] = types.Types[types.TUINTPTR]
typs[6] = types.Types[types.TBOOL]
typs[7] = types.Types[types.TUNSAFEPTR]
- typs[8] = functype(nil, []*ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Node{anonfield(typs[7])})
+ typs[8] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []ir.Node{anonfield(typs[7])})
typs[9] = functype(nil, nil, nil)
typs[10] = types.Types[types.TINTER]
- typs[11] = functype(nil, []*ir.Node{anonfield(typs[10])}, nil)
+ typs[11] = functype(nil, []ir.Node{anonfield(typs[10])}, nil)
typs[12] = types.Types[types.TINT32]
typs[13] = types.NewPtr(typs[12])
- typs[14] = functype(nil, []*ir.Node{anonfield(typs[13])}, []*ir.Node{anonfield(typs[10])})
+ typs[14] = functype(nil, []ir.Node{anonfield(typs[13])}, []ir.Node{anonfield(typs[10])})
typs[15] = types.Types[types.TINT]
- typs[16] = functype(nil, []*ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
+ typs[16] = functype(nil, []ir.Node{anonfield(typs[15]), anonfield(typs[15])}, nil)
typs[17] = types.Types[types.TUINT]
- typs[18] = functype(nil, []*ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
- typs[19] = functype(nil, []*ir.Node{anonfield(typs[6])}, nil)
+ typs[18] = functype(nil, []ir.Node{anonfield(typs[17]), anonfield(typs[15])}, nil)
+ typs[19] = functype(nil, []ir.Node{anonfield(typs[6])}, nil)
typs[20] = types.Types[types.TFLOAT64]
- typs[21] = functype(nil, []*ir.Node{anonfield(typs[20])}, nil)
+ typs[21] = functype(nil, []ir.Node{anonfield(typs[20])}, nil)
typs[22] = types.Types[types.TINT64]
- typs[23] = functype(nil, []*ir.Node{anonfield(typs[22])}, nil)
+ typs[23] = functype(nil, []ir.Node{anonfield(typs[22])}, nil)
typs[24] = types.Types[types.TUINT64]
- typs[25] = functype(nil, []*ir.Node{anonfield(typs[24])}, nil)
+ typs[25] = functype(nil, []ir.Node{anonfield(typs[24])}, nil)
typs[26] = types.Types[types.TCOMPLEX128]
- typs[27] = functype(nil, []*ir.Node{anonfield(typs[26])}, nil)
+ typs[27] = functype(nil, []ir.Node{anonfield(typs[26])}, nil)
typs[28] = types.Types[types.TSTRING]
- typs[29] = functype(nil, []*ir.Node{anonfield(typs[28])}, nil)
- typs[30] = functype(nil, []*ir.Node{anonfield(typs[2])}, nil)
- typs[31] = functype(nil, []*ir.Node{anonfield(typs[5])}, nil)
+ typs[29] = functype(nil, []ir.Node{anonfield(typs[28])}, nil)
+ typs[30] = functype(nil, []ir.Node{anonfield(typs[2])}, nil)
+ typs[31] = functype(nil, []ir.Node{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
- typs[34] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])})
- typs[35] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])})
- typs[36] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])})
- typs[37] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[28])})
+ typs[34] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[35] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[36] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
+ typs[37] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
- typs[39] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Node{anonfield(typs[28])})
- typs[40] = functype(nil, []*ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[15])})
+ typs[39] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[38])}, []ir.Node{anonfield(typs[28])})
+ typs[40] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[28])}, []ir.Node{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
- typs[43] = functype(nil, []*ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[28])})
- typs[44] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[28])})
- typs[45] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[28])})
+ typs[43] = functype(nil, []ir.Node{anonfield(typs[42]), anonfield(typs[22])}, []ir.Node{anonfield(typs[28])})
+ typs[44] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])})
+ typs[45] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[28])})
typs[46] = types.Runetype
typs[47] = types.NewSlice(typs[46])
- typs[48] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Node{anonfield(typs[28])})
+ typs[48] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[47])}, []ir.Node{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
- typs[50] = functype(nil, []*ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[49])})
+ typs[50] = functype(nil, []ir.Node{anonfield(typs[33]), anonfield(typs[28])}, []ir.Node{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
- typs[53] = functype(nil, []*ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Node{anonfield(typs[47])})
- typs[54] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[15])})
- typs[55] = functype(nil, []*ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[46]), anonfield(typs[15])})
- typs[56] = functype(nil, []*ir.Node{anonfield(typs[28])}, []*ir.Node{anonfield(typs[15])})
- typs[57] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[2])})
- typs[58] = functype(nil, []*ir.Node{anonfield(typs[2])}, []*ir.Node{anonfield(typs[7])})
- typs[59] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[2])})
- typs[60] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[2]), anonfield(typs[6])})
- typs[61] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
- typs[62] = functype(nil, []*ir.Node{anonfield(typs[1])}, nil)
+ typs[53] = functype(nil, []ir.Node{anonfield(typs[52]), anonfield(typs[28])}, []ir.Node{anonfield(typs[47])})
+ typs[54] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []ir.Node{anonfield(typs[15])})
+ typs[55] = functype(nil, []ir.Node{anonfield(typs[28]), anonfield(typs[15])}, []ir.Node{anonfield(typs[46]), anonfield(typs[15])})
+ typs[56] = functype(nil, []ir.Node{anonfield(typs[28])}, []ir.Node{anonfield(typs[15])})
+ typs[57] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2])})
+ typs[58] = functype(nil, []ir.Node{anonfield(typs[2])}, []ir.Node{anonfield(typs[7])})
+ typs[59] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, []ir.Node{anonfield(typs[2])})
+ typs[60] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[2])}, []ir.Node{anonfield(typs[2]), anonfield(typs[6])})
+ typs[61] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
+ typs[62] = functype(nil, []ir.Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
- typs[64] = functype(nil, []*ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[6])})
+ typs[64] = functype(nil, []ir.Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])})
typs[65] = types.Types[types.TUINT32]
- typs[66] = functype(nil, nil, []*ir.Node{anonfield(typs[65])})
+ typs[66] = functype(nil, nil, []ir.Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
- typs[68] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[67])})
- typs[69] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[67])})
- typs[70] = functype(nil, nil, []*ir.Node{anonfield(typs[67])})
- typs[71] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[3])})
- typs[72] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[3])})
- typs[73] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Node{anonfield(typs[3])})
- typs[74] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[75] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[76] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Node{anonfield(typs[3]), anonfield(typs[6])})
- typs[77] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
- typs[78] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
- typs[79] = functype(nil, []*ir.Node{anonfield(typs[3])}, nil)
- typs[80] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
+ typs[68] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])})
+ typs[69] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []ir.Node{anonfield(typs[67])})
+ typs[70] = functype(nil, nil, []ir.Node{anonfield(typs[67])})
+ typs[71] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3])})
+ typs[72] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3])})
+ typs[73] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3])})
+ typs[74] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[75] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[76] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []ir.Node{anonfield(typs[3]), anonfield(typs[6])})
+ typs[77] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
+ typs[78] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
+ typs[79] = functype(nil, []ir.Node{anonfield(typs[3])}, nil)
+ typs[80] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
- typs[82] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[81])})
- typs[83] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[81])})
+ typs[82] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22])}, []ir.Node{anonfield(typs[81])})
+ typs[83] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15])}, []ir.Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
- typs[85] = functype(nil, []*ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
- typs[86] = functype(nil, []*ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])})
+ typs[85] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
+ typs[86] = functype(nil, []ir.Node{anonfield(typs[84]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
- typs[88] = functype(nil, []*ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
+ typs[88] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
- typs[90] = tostruct([]*ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
- typs[91] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
- typs[92] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
- typs[93] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[15])})
- typs[94] = functype(nil, []*ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])})
- typs[95] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Node{anonfield(typs[6])})
+ typs[90] = tostruct([]ir.Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
+ typs[91] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
+ typs[92] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
+ typs[93] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []ir.Node{anonfield(typs[15])})
+ typs[94] = functype(nil, []ir.Node{anonfield(typs[87]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
+ typs[95] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
- typs[97] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Node{anonfield(typs[6])})
- typs[98] = functype(nil, []*ir.Node{anonfield(typs[63])}, nil)
- typs[99] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Node{anonfield(typs[15]), anonfield(typs[6])})
- typs[100] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[7])})
- typs[101] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[7])})
- typs[102] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[7])})
+ typs[97] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []ir.Node{anonfield(typs[6])})
+ typs[98] = functype(nil, []ir.Node{anonfield(typs[63])}, nil)
+ typs[99] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []ir.Node{anonfield(typs[15]), anonfield(typs[6])})
+ typs[100] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []ir.Node{anonfield(typs[7])})
+ typs[101] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[7])})
+ typs[102] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []ir.Node{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
- typs[104] = functype(nil, []*ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Node{anonfield(typs[103])})
- typs[105] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
- typs[106] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
- typs[107] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[6])})
- typs[108] = functype(nil, []*ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Node{anonfield(typs[6])})
- typs[109] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Node{anonfield(typs[6])})
- typs[110] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[5])})
- typs[111] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Node{anonfield(typs[5])})
- typs[112] = functype(nil, []*ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Node{anonfield(typs[22])})
- typs[113] = functype(nil, []*ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Node{anonfield(typs[24])})
- typs[114] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[22])})
- typs[115] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[24])})
- typs[116] = functype(nil, []*ir.Node{anonfield(typs[20])}, []*ir.Node{anonfield(typs[65])})
- typs[117] = functype(nil, []*ir.Node{anonfield(typs[22])}, []*ir.Node{anonfield(typs[20])})
- typs[118] = functype(nil, []*ir.Node{anonfield(typs[24])}, []*ir.Node{anonfield(typs[20])})
- typs[119] = functype(nil, []*ir.Node{anonfield(typs[65])}, []*ir.Node{anonfield(typs[20])})
- typs[120] = functype(nil, []*ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Node{anonfield(typs[26])})
- typs[121] = functype(nil, []*ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
- typs[122] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
+ typs[104] = functype(nil, []ir.Node{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []ir.Node{anonfield(typs[103])})
+ typs[105] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
+ typs[106] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, nil)
+ typs[107] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []ir.Node{anonfield(typs[6])})
+ typs[108] = functype(nil, []ir.Node{anonfield(typs[3]), anonfield(typs[3])}, []ir.Node{anonfield(typs[6])})
+ typs[109] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[7])}, []ir.Node{anonfield(typs[6])})
+ typs[110] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])})
+ typs[111] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[5])}, []ir.Node{anonfield(typs[5])})
+ typs[112] = functype(nil, []ir.Node{anonfield(typs[22]), anonfield(typs[22])}, []ir.Node{anonfield(typs[22])})
+ typs[113] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, []ir.Node{anonfield(typs[24])})
+ typs[114] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[22])})
+ typs[115] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[24])})
+ typs[116] = functype(nil, []ir.Node{anonfield(typs[20])}, []ir.Node{anonfield(typs[65])})
+ typs[117] = functype(nil, []ir.Node{anonfield(typs[22])}, []ir.Node{anonfield(typs[20])})
+ typs[118] = functype(nil, []ir.Node{anonfield(typs[24])}, []ir.Node{anonfield(typs[20])})
+ typs[119] = functype(nil, []ir.Node{anonfield(typs[65])}, []ir.Node{anonfield(typs[20])})
+ typs[120] = functype(nil, []ir.Node{anonfield(typs[26]), anonfield(typs[26])}, []ir.Node{anonfield(typs[26])})
+ typs[121] = functype(nil, []ir.Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
+ typs[122] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[123] = types.NewSlice(typs[7])
- typs[124] = functype(nil, []*ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil)
+ typs[124] = functype(nil, []ir.Node{anonfield(typs[7]), anonfield(typs[123])}, nil)
typs[125] = types.Types[types.TUINT8]
- typs[126] = functype(nil, []*ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
+ typs[126] = functype(nil, []ir.Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[types.TUINT16]
- typs[128] = functype(nil, []*ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
- typs[129] = functype(nil, []*ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
- typs[130] = functype(nil, []*ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
+ typs[128] = functype(nil, []ir.Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
+ typs[129] = functype(nil, []ir.Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
+ typs[130] = functype(nil, []ir.Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}
"fmt"
)
-func (p *noder) funcLit(expr *syntax.FuncLit) *ir.Node {
+func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
-func typecheckclosure(clo *ir.Node, top int) {
+func typecheckclosure(clo ir.Node, top int) {
fn := clo.Func()
dcl := fn.Decl
// Set current associated iota value, so iota can be used inside
// closurename generates a new unique name for a closure within
// outerfunc.
-func closurename(outerfunc *ir.Node) *types.Sym {
+func closurename(outerfunc ir.Node) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
-func capturevars(dcl *ir.Node) {
+func capturevars(dcl ir.Node) {
lno := base.Pos
base.Pos = dcl.Pos()
fn := dcl.Func()
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
-func transformclosure(dcl *ir.Node) {
+func transformclosure(dcl ir.Node) {
lno := base.Pos
base.Pos = dcl.Pos()
fn := dcl.Func()
// We are going to insert captured variables before input args.
var params []*types.Field
- var decls []*ir.Node
+ var decls []ir.Node
for _, v := range fn.ClosureVars.Slice() {
if !v.Name().Byval() {
// If v of type T is captured by reference,
dcl.SetType(f.Type()) // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
- var body []*ir.Node
+ var body []ir.Node
offset := int64(Widthptr)
for _, v := range fn.ClosureVars.Slice() {
// cv refers to the field inside of closure OSTRUCTLIT.
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
-func hasemptycvars(clo *ir.Node) bool {
+func hasemptycvars(clo ir.Node) bool {
return clo.Func().ClosureVars.Len() == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
-func closuredebugruntimecheck(clo *ir.Node) {
+func closuredebugruntimecheck(clo ir.Node) {
if base.Debug.Closure > 0 {
if clo.Esc() == EscHeap {
base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars)
// closureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
-func closureType(clo *ir.Node) *types.Type {
+func closureType(clo ir.Node) *types.Type {
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
- fields := []*ir.Node{
+ fields := []ir.Node{
namedfield(".F", types.Types[types.TUINTPTR]),
}
for _, v := range clo.Func().ClosureVars.Slice() {
return typ
}
-func walkclosure(clo *ir.Node, init *ir.Nodes) *ir.Node {
+func walkclosure(clo ir.Node, init *ir.Nodes) ir.Node {
fn := clo.Func()
// If no closure vars, don't bother wrapping.
clos := ir.Nod(ir.OCOMPLIT, nil, typenod(typ))
clos.SetEsc(clo.Esc())
- clos.PtrList().Set(append([]*ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
+ clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
clos = ir.Nod(ir.OADDR, clos, nil)
clos.SetEsc(clo.Esc())
return walkexpr(clos, init)
}
-func typecheckpartialcall(dot *ir.Node, sym *types.Sym) {
+func typecheckpartialcall(dot ir.Node, sym *types.Sym) {
switch dot.Op() {
case ir.ODOTINTER, ir.ODOTMETH:
break
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
-func makepartialcall(dot *ir.Node, t0 *types.Type, meth *types.Sym) *ir.Node {
+func makepartialcall(dot ir.Node, t0 *types.Type, meth *types.Sym) ir.Node {
rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
ptr := NewName(lookup(".this"))
declare(ptr, ir.PAUTO)
ptr.Name().SetUsed(true)
- var body []*ir.Node
+ var body []ir.Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
ptr.SetType(rcvrtype)
body = append(body, ir.Nod(ir.OAS, ptr, cv))
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
-func partialCallType(n *ir.Node) *types.Type {
- t := tostruct([]*ir.Node{
+func partialCallType(n ir.Node) *types.Type {
+ t := tostruct([]ir.Node{
namedfield("F", types.Types[types.TUINTPTR]),
namedfield("R", n.Left().Type()),
})
return t
}
-func walkpartialcall(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkpartialcall(n ir.Node, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
-func callpartMethod(n *ir.Node) *types.Field {
+func callpartMethod(n ir.Node) *types.Field {
if n.Op() != ir.OCALLPART {
base.Fatalf("expected OCALLPART, got %v", n)
}
}
// TODO(mdempsky): Replace these with better APIs.
-func convlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, false, nil) }
-func defaultlit(n *ir.Node, t *types.Type) *ir.Node { return convlit1(n, t, false, nil) }
+func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+func defaultlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
// convlit1 converts an untyped expression n to type t. If n already
// has a type, convlit1 has no effect.
//
// If there's an error converting n to t, context is used in the error
// message.
-func convlit1(n *ir.Node, t *types.Type, explicit bool, context func() string) *ir.Node {
+func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node {
if explicit && t == nil {
base.Fatalf("explicit conversion missing type")
}
// If n is not a constant, evalConst returns n.
// Otherwise, evalConst returns a new OLITERAL with the same value as n,
// and with .Orig pointing back to n.
-func evalConst(n *ir.Node) *ir.Node {
+func evalConst(n ir.Node) ir.Node {
nl, nr := n.Left(), n.Right()
// Pick off just the opcodes that can be constant evaluated.
}
return origConst(n, constant.MakeString(strings.Join(strs, "")))
}
- newList := make([]*ir.Node, 0, need)
+ newList := make([]ir.Node, 0, need)
for i := 0; i < len(s); i++ {
if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) {
// merge from i up to but not including i2
}
// origConst returns an OLITERAL with orig n and value v.
-func origConst(n *ir.Node, v constant.Value) *ir.Node {
+func origConst(n ir.Node, v constant.Value) ir.Node {
lno := setlineno(n)
v = convertVal(v, n.Type(), false)
base.Pos = lno
return n
}
-func origBoolConst(n *ir.Node, v bool) *ir.Node {
+func origBoolConst(n ir.Node, v bool) ir.Node {
return origConst(n, constant.MakeBool(v))
}
-func origIntConst(n *ir.Node, v int64) *ir.Node {
+func origIntConst(n ir.Node, v int64) ir.Node {
return origConst(n, constant.MakeInt64(v))
}
// force means must assign concrete (non-ideal) type.
// The results of defaultlit2 MUST be assigned back to l and r, e.g.
// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
-func defaultlit2(l *ir.Node, r *ir.Node, force bool) (*ir.Node, *ir.Node) {
+func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
if l.Type() == nil || r.Type() == nil {
return l, r
}
return nil
}
-func smallintconst(n *ir.Node) bool {
+func smallintconst(n ir.Node) bool {
if n.Op() == ir.OLITERAL {
v, ok := constant.Int64Val(n.Val())
return ok && int64(int32(v)) == v
// If n is not a constant expression, not representable as an
// integer, or negative, it returns -1. If n is too large, it
// returns -2.
-func indexconst(n *ir.Node) int64 {
+func indexconst(n ir.Node) int64 {
if n.Op() != ir.OLITERAL {
return -1
}
//
// Expressions derived from nil, like string([]byte(nil)), while they
// may be known at compile time, are not Go language constants.
-func isGoConst(n *ir.Node) bool {
+func isGoConst(n ir.Node) bool {
return n.Op() == ir.OLITERAL
}
-func hascallchan(n *ir.Node) bool {
+func hascallchan(n ir.Node) bool {
if n == nil {
return false
}
// where are used in the error message.
//
// n must not be an untyped constant.
-func (s *constSet) add(pos src.XPos, n *ir.Node, what, where string) {
+func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) {
if n.Op() == ir.OCONVIFACE && n.Implicit() {
n = n.Left()
}
// the latter is non-obvious.
//
// TODO(mdempsky): This could probably be a fmt.go flag.
-func nodeAndVal(n *ir.Node) string {
+func nodeAndVal(n ir.Node) string {
show := n.String()
val := ir.ConstValue(n)
if s := fmt.Sprintf("%#v", val); show != s {
// Declaration stack & operations
-var externdcl []*ir.Node
+var externdcl []ir.Node
func testdclstack() {
if !types.IsDclstackValid() {
// declare records that Node n declares symbol n.Sym in the specified
// declaration context.
-func declare(n *ir.Node, ctxt ir.Class) {
+func declare(n ir.Node, ctxt ir.Class) {
if ir.IsBlank(n) {
return
}
autoexport(n, ctxt)
}
-func addvar(n *ir.Node, t *types.Type, ctxt ir.Class) {
+func addvar(n ir.Node, t *types.Type, ctxt ir.Class) {
if n == nil || n.Sym() == nil || (n.Op() != ir.ONAME && n.Op() != ir.ONONAME) || t == nil {
base.Fatalf("addvar: n=%v t=%v nil", n, t)
}
// declare variables from grammar
// new_name_list (type | [type] = expr_list)
-func variter(vl []*ir.Node, t *ir.Node, el []*ir.Node) []*ir.Node {
- var init []*ir.Node
+func variter(vl []ir.Node, t ir.Node, el []ir.Node) []ir.Node {
+ var init []ir.Node
doexpr := len(el) > 0
if len(el) == 1 && len(vl) > 1 {
nel := len(el)
for _, v := range vl {
- var e *ir.Node
+ var e ir.Node
if doexpr {
if len(el) == 0 {
base.Errorf("assignment mismatch: %d variables but %d values", len(vl), nel)
}
// newnoname returns a new ONONAME Node associated with symbol s.
-func newnoname(s *types.Sym) *ir.Node {
+func newnoname(s *types.Sym) ir.Node {
if s == nil {
base.Fatalf("newnoname nil")
}
}
// newfuncnamel generates a new name node for a function or method.
-func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) *ir.Node {
+func newfuncnamel(pos src.XPos, s *types.Sym, fn *ir.Func) ir.Node {
if fn.Nname != nil {
base.Fatalf("newfuncnamel - already have name")
}
// this generates a new name node for a name
// being declared.
-func dclname(s *types.Sym) *ir.Node {
+func dclname(s *types.Sym) ir.Node {
n := NewName(s)
n.SetOp(ir.ONONAME) // caller will correct it
return n
}
-func typenod(t *types.Type) *ir.Node {
+func typenod(t *types.Type) ir.Node {
return typenodl(src.NoXPos, t)
}
-func typenodl(pos src.XPos, t *types.Type) *ir.Node {
+func typenodl(pos src.XPos, t *types.Type) ir.Node {
// if we copied another type with *t = *u
// then t->nod might be out of date, so
// check t->nod->type too
return ir.AsNode(t.Nod)
}
-func anonfield(typ *types.Type) *ir.Node {
+func anonfield(typ *types.Type) ir.Node {
return symfield(nil, typ)
}
-func namedfield(s string, typ *types.Type) *ir.Node {
+func namedfield(s string, typ *types.Type) ir.Node {
return symfield(lookup(s), typ)
}
-func symfield(s *types.Sym, typ *types.Type) *ir.Node {
+func symfield(s *types.Sym, typ *types.Type) ir.Node {
n := nodSym(ir.ODCLFIELD, nil, s)
n.SetType(typ)
return n
// If no such Node currently exists, an ONONAME Node is returned instead.
// Automatically creates a new closure variable if the referenced symbol was
// declared in a different (containing) function.
-func oldname(s *types.Sym) *ir.Node {
+func oldname(s *types.Sym) ir.Node {
n := ir.AsNode(s.Def)
if n == nil {
// Maybe a top-level declaration will come along later to
}
// importName is like oldname, but it reports an error if sym is from another package and not exported.
-func importName(sym *types.Sym) *ir.Node {
+func importName(sym *types.Sym) ir.Node {
n := oldname(sym)
if !types.IsExported(sym.Name) && sym.Pkg != ir.LocalPkg {
n.SetDiag(true)
}
// := declarations
-func colasname(n *ir.Node) bool {
+func colasname(n ir.Node) bool {
switch n.Op() {
case ir.ONAME,
ir.ONONAME,
return false
}
-func colasdefn(left []*ir.Node, defn *ir.Node) {
+func colasdefn(left []ir.Node, defn ir.Node) {
for _, n := range left {
if n.Sym() != nil {
n.Sym().SetUniq(true)
// declare the arguments in an
// interface field declaration.
-func ifacedcl(n *ir.Node) {
+func ifacedcl(n ir.Node) {
if n.Op() != ir.ODCLFIELD || n.Left() == nil {
base.Fatalf("ifacedcl")
}
// and declare the arguments.
// called in extern-declaration context
// returns in auto-declaration context.
-func funchdr(n *ir.Node) {
+func funchdr(n ir.Node) {
// change the declaration context from extern to auto
funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
Curfn = n
}
}
-func funcargs(nt *ir.Node) {
+func funcargs(nt ir.Node) {
if nt.Op() != ir.OTFUNC {
base.Fatalf("funcargs %v", nt.Op())
}
vargen = oldvargen
}
-func funcarg(n *ir.Node, ctxt ir.Class) {
+func funcarg(n ir.Node, ctxt ir.Class) {
if n.Op() != ir.ODCLFIELD {
base.Fatalf("funcarg %v", n.Op())
}
var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
type funcStackEnt struct {
- curfn *ir.Node
+ curfn ir.Node
dclcontext ir.Class
}
}
}
-func structfield(n *ir.Node) *types.Field {
+func structfield(n ir.Node) *types.Field {
lno := base.Pos
base.Pos = n.Pos()
// convert a parsed id/type list into
// a type for struct/interface/arglist
-func tostruct(l []*ir.Node) *types.Type {
+func tostruct(l []ir.Node) *types.Type {
t := types.New(types.TSTRUCT)
fields := make([]*types.Field, len(l))
return t
}
-func tofunargs(l []*ir.Node, funarg types.Funarg) *types.Type {
+func tofunargs(l []ir.Node, funarg types.Funarg) *types.Type {
t := types.New(types.TSTRUCT)
t.StructType().Funarg = funarg
return t
}
-func interfacefield(n *ir.Node) *types.Field {
+func interfacefield(n ir.Node) *types.Field {
lno := base.Pos
base.Pos = n.Pos()
return f
}
-func tointerface(l []*ir.Node) *types.Type {
+func tointerface(l []ir.Node) *types.Type {
if len(l) == 0 {
return types.Types[types.TINTER]
}
return t
}
-func fakeRecv() *ir.Node {
+func fakeRecv() ir.Node {
return anonfield(types.FakeRecvType())
}
}
// turn a parsed function declaration into a type
-func functype(this *ir.Node, in, out []*ir.Node) *types.Type {
+func functype(this ir.Node, in, out []ir.Node) *types.Type {
t := types.New(types.TFUNC)
- var rcvr []*ir.Node
+ var rcvr []ir.Node
if this != nil {
- rcvr = []*ir.Node{this}
+ rcvr = []ir.Node{this}
}
t.FuncType().Receiver = tofunargs(rcvr, types.FunargRcvr)
t.FuncType().Params = tofunargs(in, types.FunargParams)
// - msym is the method symbol
// - t is function type (with receiver)
// Returns a pointer to the existing or added Field; or nil if there's an error.
-func addmethod(n *ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
+func addmethod(n ir.Node, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
if msym == nil {
base.Fatalf("no method symbol")
}
}
// setNodeNameFunc marks a node as a function.
-func setNodeNameFunc(n *ir.Node) {
+func setNodeNameFunc(n ir.Node) {
if n.Op() != ir.ONAME || n.Class() != ir.Pxxx {
base.Fatalf("expected ONAME/Pxxx node, got %v", n)
}
n.Sym().SetFunc(true)
}
-func dclfunc(sym *types.Sym, tfn *ir.Node) *ir.Node {
+func dclfunc(sym *types.Sym, tfn ir.Node) ir.Node {
if tfn.Op() != ir.OTFUNC {
base.Fatalf("expected OTFUNC node, got %v", tfn)
}
// extraCalls contains extra function calls that may not be
// visible during later analysis. It maps from the ODCLFUNC of
// the caller to a list of callees.
- extraCalls map[*ir.Node][]nowritebarrierrecCall
+ extraCalls map[ir.Node][]nowritebarrierrecCall
// curfn is the current function during AST walks.
- curfn *ir.Node
+ curfn ir.Node
}
type nowritebarrierrecCall struct {
- target *ir.Node // ODCLFUNC of caller or callee
+ target ir.Node // ODCLFUNC of caller or callee
lineno src.XPos // line of call
}
// must be called before transformclosure and walk.
func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
c := &nowritebarrierrecChecker{
- extraCalls: make(map[*ir.Node][]nowritebarrierrecCall),
+ extraCalls: make(map[ir.Node][]nowritebarrierrecCall),
}
// Find all systemstack calls and record their targets. In
return c
}
-func (c *nowritebarrierrecChecker) findExtraCalls(n *ir.Node) bool {
+func (c *nowritebarrierrecChecker) findExtraCalls(n ir.Node) bool {
if n.Op() != ir.OCALLFUNC {
return true
}
return true
}
- var callee *ir.Node
+ var callee ir.Node
arg := n.List().First()
switch arg.Op() {
case ir.ONAME:
// because that's all we know after we start SSA.
//
// This can be called concurrently for different from Nodes.
-func (c *nowritebarrierrecChecker) recordCall(from *ir.Node, to *obj.LSym, pos src.XPos) {
+func (c *nowritebarrierrecChecker) recordCall(from ir.Node, to *obj.LSym, pos src.XPos) {
if from.Op() != ir.ODCLFUNC {
base.Fatalf("expected ODCLFUNC, got %v", from)
}
// capture all calls created by lowering, but this means we
// only get to see the obj.LSyms of calls. symToFunc lets us
// get back to the ODCLFUNCs.
- symToFunc := make(map[*obj.LSym]*ir.Node)
+ symToFunc := make(map[*obj.LSym]ir.Node)
// funcs records the back-edges of the BFS call graph walk. It
// maps from the ODCLFUNC of each function that must not have
// write barriers to the call that inhibits them. Functions
// that are directly marked go:nowritebarrierrec are in this
// map with a zero-valued nowritebarrierrecCall. This also
// acts as the set of marks for the BFS of the call graph.
- funcs := make(map[*ir.Node]nowritebarrierrecCall)
+ funcs := make(map[ir.Node]nowritebarrierrecCall)
// q is the queue of ODCLFUNC Nodes to visit in BFS order.
var q ir.NodeQueue
// Perform a BFS of the call graph from all
// go:nowritebarrierrec functions.
- enqueue := func(src, target *ir.Node, pos src.XPos) {
+ enqueue := func(src, target ir.Node, pos src.XPos) {
if target.Func().Pragma&ir.Yeswritebarrierrec != 0 {
// Don't flow into this function.
return
"strings"
)
-var embedlist []*ir.Node
+var embedlist []ir.Node
const (
embedUnknown = iota
var numLocalEmbed int
-func varEmbed(p *noder, names []*ir.Node, typ *ir.Node, exprs []*ir.Node, embeds []PragmaEmbed) (newExprs []*ir.Node) {
+func varEmbed(p *noder, names []ir.Node, typ ir.Node, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
v.Name().Param.Ntype = typ
v.SetClass(ir.PEXTERN)
externdcl = append(externdcl, v)
- exprs = []*ir.Node{v}
+ exprs = []ir.Node{v}
}
v.Name().Param.SetEmbedFiles(list)
// The match is approximate because we haven't done scope resolution yet and
// can't tell whether "string" and "byte" really mean "string" and "byte".
// The result must be confirmed later, after type checking, using embedKind.
-func embedKindApprox(typ *ir.Node) int {
+func embedKindApprox(typ ir.Node) int {
if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
// initEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
-func initEmbed(v *ir.Node) {
+func initEmbed(v ir.Node) {
files := v.Name().Param.EmbedFiles()
switch kind := embedKind(v.Type()); kind {
case embedUnknown:
type Escape struct {
allLocs []*EscLocation
- curfn *ir.Node
+ curfn ir.Node
// loopDepth counts the current loop nesting depth within
// curfn. It increments within each "for" loop and at each
// An EscLocation represents an abstract location that stores a Go
// variable.
type EscLocation struct {
- n *ir.Node // represented variable or expression, if any
- curfn *ir.Node // enclosing function
+ n ir.Node // represented variable or expression, if any
+ curfn ir.Node // enclosing function
edges []EscEdge // incoming edges
loopDepth int // loopDepth at declaration
}
// escFmt is called from node printing to print information about escape analysis results.
-func escFmt(n *ir.Node, short bool) string {
+func escFmt(n ir.Node, short bool) string {
text := ""
switch n.Esc() {
case EscUnknown:
// escapeFuncs performs escape analysis on a minimal batch of
// functions.
-func escapeFuncs(fns []*ir.Node, recursive bool) {
+func escapeFuncs(fns []ir.Node, recursive bool) {
for _, fn := range fns {
if fn.Op() != ir.ODCLFUNC {
base.Fatalf("unexpected node: %v", fn)
e.finish(fns)
}
-func (e *Escape) initFunc(fn *ir.Node) {
+func (e *Escape) initFunc(fn ir.Node) {
if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown {
base.Fatalf("unexpected node: %v", fn)
}
}
}
-func (e *Escape) walkFunc(fn *ir.Node) {
+func (e *Escape) walkFunc(fn ir.Node) {
fn.SetEsc(EscFuncStarted)
// Identify labels that mark the head of an unstructured loop.
- ir.InspectList(fn.Body(), func(n *ir.Node) bool {
+ ir.InspectList(fn.Body(), func(n ir.Node) bool {
switch n.Op() {
case ir.OLABEL:
n.Sym().Label = nonlooping
// }
// stmt evaluates a single Go statement.
-func (e *Escape) stmt(n *ir.Node) {
+func (e *Escape) stmt(n ir.Node) {
if n == nil {
return
}
// expr models evaluating an expression n and flowing the result into
// hole k.
-func (e *Escape) expr(k EscHole, n *ir.Node) {
+func (e *Escape) expr(k EscHole, n ir.Node) {
if n == nil {
return
}
e.exprSkipInit(k, n)
}
-func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) {
+func (e *Escape) exprSkipInit(k EscHole, n ir.Node) {
if n == nil {
return
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
-func (e *Escape) unsafeValue(k EscHole, n *ir.Node) {
+func (e *Escape) unsafeValue(k EscHole, n ir.Node) {
if n.Type().Etype != types.TUINTPTR {
base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
// discard evaluates an expression n for side-effects, but discards
// its value.
-func (e *Escape) discard(n *ir.Node) {
+func (e *Escape) discard(n ir.Node) {
e.expr(e.discardHole(), n)
}
// addr evaluates an addressable expression n and returns an EscHole
// that represents storing into the represented location.
-func (e *Escape) addr(n *ir.Node) EscHole {
+func (e *Escape) addr(n ir.Node) EscHole {
if n == nil || ir.IsBlank(n) {
// Can happen at least in OSELRECV.
// TODO(mdempsky): Anywhere else?
}
// assign evaluates the assignment dst = src.
-func (e *Escape) assign(dst, src *ir.Node, why string, where *ir.Node) {
+func (e *Escape) assign(dst, src ir.Node, why string, where ir.Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
if ignore && base.Flag.LowerM != 0 {
}
}
-func (e *Escape) assignHeap(src *ir.Node, why string, where *ir.Node) {
+func (e *Escape) assignHeap(src ir.Node, why string, where ir.Node) {
e.expr(e.heapHole().note(where, why), src)
}
// call evaluates a call expressions, including builtin calls. ks
// should contain the holes representing where the function callee's
// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
+func (e *Escape) call(ks []EscHole, call, where ir.Node) {
topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
if topLevelDefer {
// force stack allocation of defer record, unless
where.SetEsc(EscNever)
}
- argument := func(k EscHole, arg *ir.Node) {
+ argument := func(k EscHole, arg ir.Node) {
if topLevelDefer {
// Top level defers arguments don't escape to
// heap, but they do need to last until end of
fixVariadicCall(call)
// Pick out the function callee, if statically known.
- var fn *ir.Node
+ var fn ir.Node
switch call.Op() {
case ir.OCALLFUNC:
switch v := staticValue(call.Left()); {
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
-func (e *Escape) tagHole(ks []EscHole, fn *ir.Node, param *types.Field) EscHole {
+func (e *Escape) tagHole(ks []EscHole, fn ir.Node, param *types.Field) EscHole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
// fn has not yet been analyzed, so its parameters and results
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
-func (e *Escape) inMutualBatch(fn *ir.Node) bool {
+func (e *Escape) inMutualBatch(fn ir.Node) bool {
if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged {
if fn.Name().Defn.Esc() == EscFuncUnknown {
base.Fatalf("graph inconsistency")
type EscNote struct {
next *EscNote
- where *ir.Node
+ where ir.Node
why string
}
-func (k EscHole) note(where *ir.Node, why string) EscHole {
+func (k EscHole) note(where ir.Node, why string) EscHole {
if where == nil || why == "" {
base.Fatalf("note: missing where/why")
}
return k
}
-func (k EscHole) deref(where *ir.Node, why string) EscHole { return k.shift(1).note(where, why) }
-func (k EscHole) addr(where *ir.Node, why string) EscHole { return k.shift(-1).note(where, why) }
+func (k EscHole) deref(where ir.Node, why string) EscHole { return k.shift(1).note(where, why) }
+func (k EscHole) addr(where ir.Node, why string) EscHole { return k.shift(-1).note(where, why) }
-func (k EscHole) dotType(t *types.Type, where *ir.Node, why string) EscHole {
+func (k EscHole) dotType(t *types.Type, where ir.Node, why string) EscHole {
if !t.IsInterface() && !isdirectiface(t) {
k = k.shift(1)
}
return loc.asHole()
}
-func (e *Escape) dcl(n *ir.Node) EscHole {
+func (e *Escape) dcl(n ir.Node) EscHole {
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
// spill allocates a new location associated with expression n, flows
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
-func (e *Escape) spill(k EscHole, n *ir.Node) EscHole {
+func (e *Escape) spill(k EscHole, n ir.Node) EscHole {
loc := e.newLoc(n, true)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
// canonicalNode returns the canonical *Node that n logically
// represents.
-func canonicalNode(n *ir.Node) *ir.Node {
+func canonicalNode(n ir.Node) ir.Node {
if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() {
n = n.Name().Defn
if n.Name().IsClosureVar() {
return n
}
-func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation {
+func (e *Escape) newLoc(n ir.Node, transient bool) *EscLocation {
if e.curfn == nil {
base.Fatalf("e.curfn isn't set")
}
return loc
}
-func (e *Escape) oldLoc(n *ir.Node) *EscLocation {
+func (e *Escape) oldLoc(n ir.Node) *EscLocation {
n = canonicalNode(n)
return n.Opt().(*EscLocation)
}
}
// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c *ir.Node) bool {
+func containsClosure(f, c ir.Node) bool {
if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC {
base.Fatalf("bad containsClosure: %v, %v", f, c)
}
l.paramEsc.AddHeap(derefs)
}
-func (e *Escape) finish(fns []*ir.Node) {
+func (e *Escape) finish(fns []ir.Node) {
// Record parameter tags for package export data.
for _, fn := range fns {
fn.SetEsc(EscFuncTagged)
return l
}
-func escapes(all []*ir.Node) {
+func escapes(all []ir.Node) {
visitBottomUp(all, escapeFuncs)
}
)
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn *ir.Node) *types.Sym {
+func funcSym(fn ir.Node) *types.Sym {
if fn == nil || fn.Func().Nname == nil {
return nil
}
nonlooping = ir.Nod(ir.OXXX, nil, nil)
)
-func isSliceSelfAssign(dst, src *ir.Node) bool {
+func isSliceSelfAssign(dst, src ir.Node) bool {
// Detect the following special case.
//
// func (b *Buffer) Foo() {
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
-func isSelfAssign(dst, src *ir.Node) bool {
+func isSelfAssign(dst, src ir.Node) bool {
if isSliceSelfAssign(dst, src) {
return true
}
// mayAffectMemory reports whether evaluation of n may affect the program's
// memory state. If the expression can't affect memory state, then it can be
// safely ignored by the escape analysis.
-func mayAffectMemory(n *ir.Node) bool {
+func mayAffectMemory(n ir.Node) bool {
// We may want to use a list of "memory safe" ops instead of generally
// "side-effect free", which would include all calls and other ops that can
// allocate or change global state. For now, it's safer to start with the latter.
// heapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
-func heapAllocReason(n *ir.Node) string {
+func heapAllocReason(n ir.Node) string {
if n.Type() == nil {
return ""
}
// by "increasing" the "value" of n.Esc to EscHeap.
// Storage is allocated as necessary to allow the address
// to be taken.
-func addrescapes(n *ir.Node) {
+func addrescapes(n ir.Node) {
switch n.Op() {
default:
// Unexpected Op, probably due to a previous type error. Ignore.
}
// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n *ir.Node) {
+func moveToHeap(n ir.Node) {
if base.Flag.LowerR != 0 {
ir.Dump("MOVE", n)
}
// marked go:uintptrescapes.
const uintptrEscapesTag = "uintptr-escapes"
-func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string {
+func (e *Escape) paramTag(fn ir.Node, narg int, f *types.Field) string {
name := func() string {
if f.Sym != nil {
return f.Sym.Name
}
}
-var asmlist []*ir.Node
+var asmlist []ir.Node
// exportsym marks n for export (or reexport).
-func exportsym(n *ir.Node) {
+func exportsym(n ir.Node) {
if n.Sym().OnExportList() {
return
}
return s == "init"
}
-func autoexport(n *ir.Node, ctxt ir.Class) {
+func autoexport(n ir.Node, ctxt ir.Class) {
if n.Sym().Pkg != ir.LocalPkg {
return
}
}
}
-func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) *ir.Node {
+func importsym(ipkg *types.Pkg, s *types.Sym, op ir.Op) ir.Node {
n := ir.AsNode(s.PkgDef())
if n == nil {
// iimport should have created a stub ONONAME
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Node {
+func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) ir.Node {
n := importsym(ipkg, s, op)
if n.Op() != ir.ONONAME {
if n.Op() == op && (n.Class() != ctxt || !types.Identical(n.Type(), t)) {
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
-func isParamStackCopy(n *ir.Node) bool {
+func isParamStackCopy(n ir.Node) bool {
return n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Name().Param.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
-func isParamHeapCopy(n *ir.Node) bool {
+func isParamHeapCopy(n ir.Node) bool {
return n.Op() == ir.ONAME && n.Class() == ir.PAUTOHEAP && n.Name().Param.Stackcopy != nil
}
}
// make a new Node off the books
-func tempAt(pos src.XPos, curfn *ir.Node, t *types.Type) *ir.Node {
+func tempAt(pos src.XPos, curfn ir.Node, t *types.Type) ir.Node {
if curfn == nil {
base.Fatalf("no curfn for tempAt")
}
return n.Orig()
}
-func temp(t *types.Type) *ir.Node {
+func temp(t *types.Type) ir.Node {
return tempAt(base.Pos, Curfn, t)
}
iscmp [ir.OEND]bool
)
-var xtop []*ir.Node
+var xtop []ir.Node
-var exportlist []*ir.Node
+var exportlist []ir.Node
-var importlist []*ir.Node // imported functions and methods with inlinable bodies
+var importlist []ir.Node // imported functions and methods with inlinable bodies
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
var dclcontext ir.Class // PEXTERN/PAUTO
-var Curfn *ir.Node
+var Curfn ir.Node
var Widthptr int
// Whether we are tracking lexical scopes for DWARF.
var trackScopes bool
-var nodfp *ir.Node
+var nodfp ir.Node
var autogeneratedPos src.XPos
var (
staticuint64s,
- zerobase *ir.Node
+ zerobase ir.Node
assertE2I,
assertE2I2,
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
- curfn *ir.Node // fn these Progs are for
+ curfn ir.Node // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
-func newProgs(fn *ir.Node, worker int) *Progs {
+func newProgs(fn ir.Node, worker int) *Progs {
pp := new(Progs)
if base.Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / base.Flag.LowerC
return q
}
-func (pp *Progs) settext(fn *ir.Node) {
+func (pp *Progs) settext(fn ir.Node) {
if pp.Text != nil {
base.Fatalf("Progs.settext called twice")
}
base.Ctxt.InitTextSym(f.LSym, flag)
}
-func ggloblnod(nam *ir.Node) {
+func ggloblnod(nam ir.Node) {
s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
- declIndex: map[*ir.Node]uint64{},
- inlineIndex: map[*ir.Node]uint64{},
+ declIndex: map[ir.Node]uint64{},
+ inlineIndex: map[ir.Node]uint64{},
typIndex: map[*types.Type]uint64{},
}
// we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description
// (i.e., name and height).
-func (w *exportWriter) writeIndex(index map[*ir.Node]uint64, mainIndex bool) {
+func (w *exportWriter) writeIndex(index map[ir.Node]uint64, mainIndex bool) {
// Build a map from packages to objects from that package.
- pkgObjs := map[*types.Pkg][]*ir.Node{}
+ pkgObjs := map[*types.Pkg][]ir.Node{}
// For the main index, make sure to include every package that
// we reference, even if we're not exporting (or reexporting)
stringIndex map[string]uint64
data0 intWriter
- declIndex map[*ir.Node]uint64
- inlineIndex map[*ir.Node]uint64
+ declIndex map[ir.Node]uint64
+ inlineIndex map[ir.Node]uint64
typIndex map[*types.Type]uint64
}
}
// pushDecl adds n to the declaration work queue, if not already present.
-func (p *iexporter) pushDecl(n *ir.Node) {
+func (p *iexporter) pushDecl(n ir.Node) {
if n.Sym() == nil || ir.AsNode(n.Sym().Def) != n && n.Op() != ir.OTYPE {
base.Fatalf("weird Sym: %v, %v", n, n.Sym())
}
prevColumn int64
}
-func (p *iexporter) doDecl(n *ir.Node) {
+func (p *iexporter) doDecl(n ir.Node) {
w := p.newWriter()
w.setPkg(n.Sym().Pkg, false)
w.data.WriteByte(tag)
}
-func (p *iexporter) doInline(f *ir.Node) {
+func (p *iexporter) doInline(f ir.Node) {
w := p.newWriter()
w.setPkg(fnpkg(f), false)
w.string(pkg.Path)
}
-func (w *exportWriter) qualifiedIdent(n *ir.Node) {
+func (w *exportWriter) qualifiedIdent(n ir.Node) {
// Ensure any referenced declarations are written out too.
w.p.pushDecl(n)
// Compiler-specific extensions.
-func (w *exportWriter) varExt(n *ir.Node) {
+func (w *exportWriter) varExt(n ir.Node) {
w.linkname(n.Sym())
w.symIdx(n.Sym())
}
-func (w *exportWriter) funcExt(n *ir.Node) {
+func (w *exportWriter) funcExt(n ir.Node) {
w.linkname(n.Sym())
w.symIdx(n.Sym())
w.op(ir.OEND)
}
-func (w *exportWriter) node(n *ir.Node) {
+func (w *exportWriter) node(n ir.Node) {
if ir.OpPrec[n.Op()] < 0 {
w.stmt(n)
} else {
// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
-func (w *exportWriter) stmt(n *ir.Node) {
+func (w *exportWriter) stmt(n ir.Node) {
if n.Init().Len() > 0 && !ir.StmtWithInit(n.Op()) {
// can't use stmtList here since we don't want the final OEND
for _, n := range n.Init().Slice() {
w.op(ir.OAS2)
w.pos(n.Pos())
w.exprList(n.List())
- w.exprList(ir.AsNodes([]*ir.Node{n.Right()}))
+ w.exprList(ir.AsNodes([]ir.Node{n.Right()}))
case ir.ORETURN:
w.op(ir.ORETURN)
}
}
-func (w *exportWriter) caseList(sw *ir.Node) {
+func (w *exportWriter) caseList(sw ir.Node) {
namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
cases := sw.List().Slice()
w.op(ir.OEND)
}
-func (w *exportWriter) expr(n *ir.Node) {
+func (w *exportWriter) expr(n ir.Node) {
// from nodefmt (fmt.go)
//
// nodefmt reverts nodes back to their original - we don't need to do
w.uint64(uint64(op))
}
-func (w *exportWriter) exprsOrNil(a, b *ir.Node) {
+func (w *exportWriter) exprsOrNil(a, b ir.Node) {
ab := 0
if a != nil {
ab |= 1
}
}
-func (w *exportWriter) localName(n *ir.Node) {
+func (w *exportWriter) localName(n ir.Node) {
// Escape analysis happens after inline bodies are saved, but
// we're using the same ONAME nodes, so we might still see
// PAUTOHEAP here.
inlineImporter = map[*types.Sym]iimporterAndOffset{}
)
-func expandDecl(n *ir.Node) {
+func expandDecl(n ir.Node) {
if n.Op() != ir.ONONAME {
return
}
r.doDecl(n)
}
-func expandInline(fn *ir.Node) {
+func expandInline(fn ir.Node) {
if fn.Func().Inl.Body != nil {
return
}
r.doInline(fn)
}
-func importReaderFor(n *ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
+func importReaderFor(n ir.Node, importers map[*types.Sym]iimporterAndOffset) *importReader {
x, ok := importers[n.Sym()]
if !ok {
return nil
r.currPkg = r.pkg()
}
-func (r *importReader) doDecl(n *ir.Node) {
+func (r *importReader) doDecl(n ir.Node) {
if n.Op() != ir.ONONAME {
base.Fatalf("doDecl: unexpected Op for %v: %v", n.Sym(), n.Op())
}
// Compiler-specific extensions.
-func (r *importReader) varExt(n *ir.Node) {
+func (r *importReader) varExt(n ir.Node) {
r.linkname(n.Sym())
r.symIdx(n.Sym())
}
-func (r *importReader) funcExt(n *ir.Node) {
+func (r *importReader) funcExt(n ir.Node) {
r.linkname(n.Sym())
r.symIdx(n.Sym())
// so we can use index to reference the symbol.
var typeSymIdx = make(map[*types.Type][2]int64)
-func (r *importReader) doInline(n *ir.Node) {
+func (r *importReader) doInline(n ir.Node) {
if len(n.Func().Inl.Body) != 0 {
base.Fatalf("%v already has inline body", n)
}
// (not doing so can cause significant performance
// degradation due to unnecessary calls to empty
// functions).
- body = []*ir.Node{}
+ body = []ir.Node{}
}
n.Func().Inl.Body = body
// unrefined nodes (since this is what the importer uses). The respective case
// entries are unreachable in the importer.
-func (r *importReader) stmtList() []*ir.Node {
- var list []*ir.Node
+func (r *importReader) stmtList() []ir.Node {
+ var list []ir.Node
for {
n := r.node()
if n == nil {
return list
}
-func (r *importReader) caseList(sw *ir.Node) []*ir.Node {
+func (r *importReader) caseList(sw ir.Node) []ir.Node {
namedTypeSwitch := sw.Op() == ir.OSWITCH && sw.Left() != nil && sw.Left().Op() == ir.OTYPESW && sw.Left().Left() != nil
- cases := make([]*ir.Node, r.uint64())
+ cases := make([]ir.Node, r.uint64())
for i := range cases {
cas := ir.NodAt(r.pos(), ir.OCASE, nil, nil)
cas.PtrList().Set(r.stmtList())
return cases
}
-func (r *importReader) exprList() []*ir.Node {
- var list []*ir.Node
+func (r *importReader) exprList() []ir.Node {
+ var list []ir.Node
for {
n := r.expr()
if n == nil {
return list
}
-func (r *importReader) expr() *ir.Node {
+func (r *importReader) expr() ir.Node {
n := r.node()
if n != nil && n.Op() == ir.OBLOCK {
base.Fatalf("unexpected block node: %v", n)
}
// TODO(gri) split into expr and stmt
-func (r *importReader) node() *ir.Node {
+func (r *importReader) node() ir.Node {
switch op := r.op(); op {
// expressions
// case OPAREN:
pos := r.pos()
typ := r.typ()
- var n *ir.Node
+ var n ir.Node
if typ.HasNil() {
n = nodnil()
} else {
case ir.OSLICE, ir.OSLICE3:
n := ir.NodAt(r.pos(), op, r.expr(), nil)
low, high := r.exprsOrNil()
- var max *ir.Node
+ var max ir.Node
if n.Op().IsSlice3() {
max = r.expr()
}
pos := r.pos()
lhs := npos(pos, dclname(r.ident()))
typ := typenod(r.typ())
- return npos(pos, liststmt(variter([]*ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
+ return npos(pos, liststmt(variter([]ir.Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
// case ODCLFIELD:
// unimplemented
return ir.Op(r.uint64())
}
-func (r *importReader) elemList() []*ir.Node {
+func (r *importReader) elemList() []ir.Node {
c := r.uint64()
- list := make([]*ir.Node, c)
+ list := make([]ir.Node, c)
for i := range list {
s := r.ident()
list[i] = nodSym(ir.OSTRUCTKEY, r.expr(), s)
return list
}
-func (r *importReader) exprsOrNil() (a, b *ir.Node) {
+func (r *importReader) exprsOrNil() (a, b ir.Node) {
ab := r.uint64()
if ab&1 != 0 {
a = r.expr()
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
-func fninit(n []*ir.Node) {
+func fninit(n []ir.Node) {
nf := initOrder(n)
var deps []*obj.LSym // initTask records for packages the current package depends on
type InitOrder struct {
// blocking maps initialization assignments to the assignments
// that depend on it.
- blocking map[*ir.Node][]*ir.Node
+ blocking map[ir.Node][]ir.Node
// ready is the queue of Pending initialization assignments
// that are ready for initialization.
// package-level declarations (in declaration order) and outputs the
// corresponding list of statements to include in the init() function
// body.
-func initOrder(l []*ir.Node) []*ir.Node {
+func initOrder(l []ir.Node) []ir.Node {
s := InitSchedule{
- initplans: make(map[*ir.Node]*InitPlan),
- inittemps: make(map[*ir.Node]*ir.Node),
+ initplans: make(map[ir.Node]*InitPlan),
+ inittemps: make(map[ir.Node]ir.Node),
}
o := InitOrder{
- blocking: make(map[*ir.Node][]*ir.Node),
+ blocking: make(map[ir.Node][]ir.Node),
}
// Process all package-level assignment in declaration order.
// first.
base.ExitIfErrors()
- findInitLoopAndExit(firstLHS(n), new([]*ir.Node))
+ findInitLoopAndExit(firstLHS(n), new([]ir.Node))
base.Fatalf("initialization unfinished, but failed to identify loop")
}
}
return s.out
}
-func (o *InitOrder) processAssign(n *ir.Node) {
+func (o *InitOrder) processAssign(n ir.Node) {
if n.Initorder() != InitNotStarted || n.Offset() != types.BADWIDTH {
base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
// flushReady repeatedly applies initialize to the earliest (in
// declaration order) assignment ready for initialization and updates
// the inverse dependency ("blocking") graph.
-func (o *InitOrder) flushReady(initialize func(*ir.Node)) {
+func (o *InitOrder) flushReady(initialize func(ir.Node)) {
for o.ready.Len() != 0 {
- n := heap.Pop(&o.ready).(*ir.Node)
+ n := heap.Pop(&o.ready).(ir.Node)
if n.Initorder() != InitPending || n.Offset() != 0 {
base.Fatalf("unexpected state: %v, %v, %v", n, n.Initorder(), n.Offset())
}
// path points to a slice used for tracking the sequence of
// variables/functions visited. Using a pointer to a slice allows the
// slice capacity to grow and limit reallocations.
-func findInitLoopAndExit(n *ir.Node, path *[]*ir.Node) {
+func findInitLoopAndExit(n ir.Node, path *[]ir.Node) {
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but initialization cycles are rare.
// There might be multiple loops involving n; by sorting
// references, we deterministically pick the one reported.
- refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj *ir.Node) bool {
+ refers := collectDeps(n.Name().Defn, false).Sorted(func(ni, nj ir.Node) bool {
return ni.Pos().Before(nj.Pos())
})
// reportInitLoopAndExit reports and initialization loop as an error
// and exits. However, if l is not actually an initialization loop, it
// simply returns instead.
-func reportInitLoopAndExit(l []*ir.Node) {
+func reportInitLoopAndExit(l []ir.Node) {
// Rotate loop so that the earliest variable declaration is at
// the start.
i := -1
// variables that declaration n depends on. If transitive is true,
// then it also includes the transitive dependencies of any depended
// upon functions (but not variables).
-func collectDeps(n *ir.Node, transitive bool) ir.NodeSet {
+func collectDeps(n ir.Node, transitive bool) ir.NodeSet {
d := initDeps{transitive: transitive}
switch n.Op() {
case ir.OAS:
seen ir.NodeSet
}
-func (d *initDeps) inspect(n *ir.Node) { ir.Inspect(n, d.visit) }
+func (d *initDeps) inspect(n ir.Node) { ir.Inspect(n, d.visit) }
func (d *initDeps) inspectList(l ir.Nodes) { ir.InspectList(l, d.visit) }
// visit calls foundDep on any package-level functions or variables
// referenced by n, if any.
-func (d *initDeps) visit(n *ir.Node) bool {
+func (d *initDeps) visit(n ir.Node) bool {
switch n.Op() {
case ir.OMETHEXPR:
d.foundDep(methodExprName(n))
// foundDep records that we've found a dependency on n by adding it to
// seen.
-func (d *initDeps) foundDep(n *ir.Node) {
+func (d *initDeps) foundDep(n ir.Node) {
// Can happen with method expressions involving interface
// types; e.g., fixedbugs/issue4495.go.
if n == nil {
// an OAS node's Pos may not be unique. For example, given the
// declaration "var a, b = f(), g()", "a" must be ordered before "b",
// but both OAS nodes use the "=" token's position as their Pos.
-type declOrder []*ir.Node
+type declOrder []ir.Node
func (s declOrder) Len() int { return len(s) }
func (s declOrder) Less(i, j int) bool {
}
func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(*ir.Node)) }
+func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
func (s *declOrder) Pop() interface{} {
n := (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
// firstLHS returns the first expression on the left-hand side of
// assignment n.
-func firstLHS(n *ir.Node) *ir.Node {
+func firstLHS(n ir.Node) ir.Node {
switch n.Op() {
case ir.OAS:
return n.Left()
// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *ir.Node) *types.Pkg {
+func fnpkg(fn ir.Node) *types.Pkg {
if ir.IsMethod(fn) {
// method
rcvr := fn.Type().Recv().Type
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
-func typecheckinl(fn *ir.Node) {
+func typecheckinl(fn ir.Node) {
lno := setlineno(fn)
expandInline(fn)
// Caninl determines whether fn is inlineable.
// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
// fn and ->nbody will already have been typechecked.
-func caninl(fn *ir.Node) {
+func caninl(fn ir.Node) {
if fn.Op() != ir.ODCLFUNC {
base.Fatalf("caninl %v", fn)
}
visitor := hairyVisitor{
budget: inlineMaxBudget,
extraCallCost: cc,
- usedLocals: make(map[*ir.Node]bool),
+ usedLocals: make(map[ir.Node]bool),
}
if visitor.visitList(fn.Body()) {
reason = visitor.reason
// inlFlood marks n's inline body for export and recursively ensures
// all called functions are marked too.
-func inlFlood(n *ir.Node) {
+func inlFlood(n ir.Node) {
if n == nil {
return
}
// Recursively identify all referenced functions for
// reexport. We want to include even non-called functions,
// because after inlining they might be callable.
- ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n *ir.Node) bool {
+ ir.InspectList(ir.AsNodes(n.Func().Inl.Body), func(n ir.Node) bool {
switch n.Op() {
case ir.OMETHEXPR:
inlFlood(methodExprName(n))
budget int32
reason string
extraCallCost int32
- usedLocals map[*ir.Node]bool
+ usedLocals map[ir.Node]bool
}
// Look for anything we want to punt on.
return false
}
-func (v *hairyVisitor) visit(n *ir.Node) bool {
+func (v *hairyVisitor) visit(n ir.Node) bool {
if n == nil {
return false
}
// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
// the body and dcls of an inlineable function.
-func inlcopylist(ll []*ir.Node) []*ir.Node {
- s := make([]*ir.Node, 0, len(ll))
+func inlcopylist(ll []ir.Node) []ir.Node {
+ s := make([]ir.Node, 0, len(ll))
for _, n := range ll {
s = append(s, inlcopy(n))
}
return s
}
-func inlcopy(n *ir.Node) *ir.Node {
+func inlcopy(n ir.Node) ir.Node {
if n == nil {
return nil
}
return m
}
-func countNodes(n *ir.Node) int {
+func countNodes(n ir.Node) int {
if n == nil {
return 0
}
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
-func inlcalls(fn *ir.Node) {
+func inlcalls(fn ir.Node) {
savefn := Curfn
Curfn = fn
maxCost := int32(inlineMaxBudget)
// but allow inlining if there is a recursion cycle of many functions.
// Most likely, the inlining will stop before we even hit the beginning of
// the cycle again, but the map catches the unusual case.
- inlMap := make(map[*ir.Node]bool)
+ inlMap := make(map[ir.Node]bool)
fn = inlnode(fn, maxCost, inlMap)
if fn != Curfn {
base.Fatalf("inlnode replaced curfn")
}
// Turn an OINLCALL into a statement.
-func inlconv2stmt(n *ir.Node) {
+func inlconv2stmt(n ir.Node) {
n.SetOp(ir.OBLOCK)
// n->ninit stays
// Turn an OINLCALL into a single valued expression.
// The result of inlconv2expr MUST be assigned back to n, e.g.
// n.Left = inlconv2expr(n.Left)
-func inlconv2expr(n *ir.Node) *ir.Node {
+func inlconv2expr(n ir.Node) ir.Node {
r := n.Rlist().First()
return addinit(r, append(n.Init().Slice(), n.Body().Slice()...))
}
// containing the inlined statements on the first list element so
// order will be preserved Used in return, oas2func and call
// statements.
-func inlconv2list(n *ir.Node) []*ir.Node {
+func inlconv2list(n ir.Node) []ir.Node {
if n.Op() != ir.OINLCALL || n.Rlist().Len() == 0 {
base.Fatalf("inlconv2list %+v\n", n)
}
return s
}
-func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[*ir.Node]bool) {
+func inlnodelist(l ir.Nodes, maxCost int32, inlMap map[ir.Node]bool) {
s := l.Slice()
for i := range s {
s[i] = inlnode(s[i], maxCost, inlMap)
// shorter and less complicated.
// The result of inlnode MUST be assigned back to n, e.g.
// n.Left = inlnode(n.Left)
-func inlnode(n *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node {
+func inlnode(n ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node {
if n == nil {
return n
}
// inlCallee takes a function-typed expression and returns the underlying function ONAME
// that it refers to if statically known. Otherwise, it returns nil.
-func inlCallee(fn *ir.Node) *ir.Node {
+func inlCallee(fn ir.Node) ir.Node {
fn = staticValue(fn)
switch {
case fn.Op() == ir.OMETHEXPR:
return nil
}
-func staticValue(n *ir.Node) *ir.Node {
+func staticValue(n ir.Node) ir.Node {
for {
if n.Op() == ir.OCONVNOP {
n = n.Left()
// staticValue1 implements a simple SSA-like optimization. If n is a local variable
// that is initialized and never reassigned, staticValue1 returns the initializer
// expression. Otherwise, it returns nil.
-func staticValue1(n *ir.Node) *ir.Node {
+func staticValue1(n ir.Node) ir.Node {
if n.Op() != ir.ONAME || n.Class() != ir.PAUTO || n.Name().Addrtaken() {
return nil
}
return nil
}
- var rhs *ir.Node
+ var rhs ir.Node
FindRHS:
switch defn.Op() {
case ir.OAS:
// useful for -m output documenting the reason for inhibited optimizations.
// NB: global variables are always considered to be re-assigned.
// TODO: handle initial declaration not including an assignment and followed by a single assignment?
-func reassigned(n *ir.Node) (bool, *ir.Node) {
+func reassigned(n ir.Node) (bool, ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("reassigned %v", n)
}
}
type reassignVisitor struct {
- name *ir.Node
+ name ir.Node
}
-func (v *reassignVisitor) visit(n *ir.Node) *ir.Node {
+func (v *reassignVisitor) visit(n ir.Node) ir.Node {
if n == nil {
return nil
}
return nil
}
-func (v *reassignVisitor) visitList(l ir.Nodes) *ir.Node {
+func (v *reassignVisitor) visitList(l ir.Nodes) ir.Node {
for _, n := range l.Slice() {
if a := v.visit(n); a != nil {
return a
return nil
}
-func inlParam(t *types.Field, as *ir.Node, inlvars map[*ir.Node]*ir.Node) *ir.Node {
+func inlParam(t *types.Field, as ir.Node, inlvars map[ir.Node]ir.Node) ir.Node {
n := ir.AsNode(t.Nname)
if n == nil || ir.IsBlank(n) {
return ir.BlankNode
// parameters.
// The result of mkinlcall MUST be assigned back to n, e.g.
// n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n, fn *ir.Node, maxCost int32, inlMap map[*ir.Node]bool) *ir.Node {
+func mkinlcall(n, fn ir.Node, maxCost int32, inlMap map[ir.Node]bool) ir.Node {
if fn.Func().Inl == nil {
if logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(Curfn),
}
// Make temp names to use instead of the originals.
- inlvars := make(map[*ir.Node]*ir.Node)
+ inlvars := make(map[ir.Node]ir.Node)
// record formals/locals for later post-processing
- var inlfvars []*ir.Node
+ var inlfvars []ir.Node
// Handle captured variables when inlining closures.
if fn.Name().Defn != nil {
}
nreturns := 0
- ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n *ir.Node) bool {
+ ir.InspectList(ir.AsNodes(fn.Func().Inl.Body), func(n ir.Node) bool {
if n != nil && n.Op() == ir.ORETURN {
nreturns++
}
delayretvars := nreturns == 1
// temporaries for return values.
- var retvars []*ir.Node
+ var retvars []ir.Node
for i, t := range fn.Type().Results().Fields().Slice() {
- var m *ir.Node
+ var m ir.Node
if n := ir.AsNode(t.Nname); n != nil && !ir.IsBlank(n) && !strings.HasPrefix(n.Sym().Name, "~r") {
m = inlvar(n)
m = typecheck(m, ctxExpr)
// For non-dotted calls to variadic functions, we assign the
// variadic parameter's temp name separately.
- var vas *ir.Node
+ var vas ir.Node
if recv := fn.Type().Recv(); recv != nil {
as.PtrList().Append(inlParam(recv, as, inlvars))
// Every time we expand a function we generate a new set of tmpnames,
// PAUTO's in the calling functions, and link them off of the
// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
-func inlvar(var_ *ir.Node) *ir.Node {
+func inlvar(var_ ir.Node) ir.Node {
if base.Flag.LowerM > 3 {
fmt.Printf("inlvar %+v\n", var_)
}
}
// Synthesize a variable to store the inlined function's results in.
-func retvar(t *types.Field, i int) *ir.Node {
+func retvar(t *types.Field, i int) ir.Node {
n := NewName(lookupN("~R", i))
n.SetType(t.Type)
n.SetClass(ir.PAUTO)
// Synthesize a variable to store the inlined function's arguments
// when they come from a multiple return call.
-func argvar(t *types.Type, i int) *ir.Node {
+func argvar(t *types.Type, i int) ir.Node {
n := NewName(lookupN("~arg", i))
n.SetType(t.Elem())
n.SetClass(ir.PAUTO)
retlabel *types.Sym
// Temporary result variables.
- retvars []*ir.Node
+ retvars []ir.Node
// Whether result variables should be initialized at the
// "return" statement.
delayretvars bool
- inlvars map[*ir.Node]*ir.Node
+ inlvars map[ir.Node]ir.Node
// bases maps from original PosBase to PosBase with an extra
// inlined call frame.
}
// list inlines a list of nodes.
-func (subst *inlsubst) list(ll ir.Nodes) []*ir.Node {
- s := make([]*ir.Node, 0, ll.Len())
+func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
+ s := make([]ir.Node, 0, ll.Len())
for _, n := range ll.Slice() {
s = append(s, subst.node(n))
}
// inlined function, substituting references to input/output
// parameters with ones to the tmpnames, and substituting returns with
// assignments to the output.
-func (subst *inlsubst) node(n *ir.Node) *ir.Node {
+func (subst *inlsubst) node(n ir.Node) ir.Node {
if n == nil {
return nil
}
return base.Ctxt.PosTable.XPos(pos)
}
-func pruneUnusedAutos(ll []*ir.Node, vis *hairyVisitor) []*ir.Node {
- s := make([]*ir.Node, 0, len(ll))
+func pruneUnusedAutos(ll []ir.Node, vis *hairyVisitor) []ir.Node {
+ s := make([]ir.Node, 0, len(ll))
for _, n := range ll {
if n.Class() == ir.PAUTO {
if _, found := vis.usedLocals[n]; !found {
// devirtualize replaces interface method calls within fn with direct
// concrete-type method calls where applicable.
-func devirtualize(fn *ir.Node) {
+func devirtualize(fn ir.Node) {
Curfn = fn
- ir.InspectList(fn.Body(), func(n *ir.Node) bool {
+ ir.InspectList(fn.Body(), func(n ir.Node) bool {
if n.Op() == ir.OCALLINTER {
devirtualizeCall(n)
}
})
}
-func devirtualizeCall(call *ir.Node) {
+func devirtualizeCall(call ir.Node) {
recv := staticValue(call.Left().Left())
if recv.Op() != ir.OCONVIFACE {
return
if base.Flag.LowerL != 0 {
// Find functions that can be inlined and clone them before walk expands them.
- visitBottomUp(xtop, func(list []*ir.Node, recursive bool) {
+ visitBottomUp(xtop, func(list []ir.Node, recursive bool) {
numfns := numNonClosures(list)
for _, n := range list {
if !recursive || numfns > 1 {
}
// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []*ir.Node) int {
+func numNonClosures(list []ir.Node) int {
count := 0
for _, n := range list {
if n.Func().OClosure == nil {
}
}
}
- return fmt.Sprintf("[]*ir.Node{%s}", strings.Join(res, ", "))
+ return fmt.Sprintf("[]ir.Node{%s}", strings.Join(res, ", "))
}
func intconst(e ast.Expr) int64 {
lastCloseScopePos syntax.Pos
}
-func (p *noder) funcBody(fn *ir.Node, block *syntax.BlockStmt) {
+func (p *noder) funcBody(fn ir.Node, block *syntax.BlockStmt) {
oldScope := p.scope
p.scope = 0
funchdr(fn)
if block != nil {
body := p.stmts(block.List)
if body == nil {
- body = []*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}
+ body = []ir.Node{ir.Nod(ir.OEMPTY, nil, nil)}
}
fn.PtrBody().Set(body)
clearImports()
}
-func (p *noder) decls(decls []syntax.Decl) (l []*ir.Node) {
+func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
var cs constState
for _, decl := range decls {
my.Block = 1 // at top level
}
-func (p *noder) varDecl(decl *syntax.VarDecl) []*ir.Node {
+func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
names := p.declNames(decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var exprs []*ir.Node
+ var exprs []ir.Node
if decl.Values != nil {
exprs = p.exprList(decl.Values)
}
// constant declarations are handled correctly (e.g., issue 15550).
type constState struct {
group *syntax.Group
- typ *ir.Node
- values []*ir.Node
+ typ ir.Node
+ values []ir.Node
iota int64
}
-func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []*ir.Node {
+func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
if decl.Group == nil || decl.Group != cs.group {
*cs = constState{
group: decl.Group,
names := p.declNames(decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var values []*ir.Node
+ var values []ir.Node
if decl.Values != nil {
values = p.exprList(decl.Values)
cs.typ, cs.values = typ, values
typ, values = cs.typ, cs.values
}
- nn := make([]*ir.Node, 0, len(names))
+ nn := make([]ir.Node, 0, len(names))
for i, n := range names {
if i >= len(values) {
base.Errorf("missing value in const declaration")
return nn
}
-func (p *noder) typeDecl(decl *syntax.TypeDecl) *ir.Node {
+func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
n := p.declName(decl.Name)
n.SetOp(ir.OTYPE)
declare(n, dclcontext)
return nod
}
-func (p *noder) declNames(names []*syntax.Name) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(names))
+func (p *noder) declNames(names []*syntax.Name) []ir.Node {
+ nodes := make([]ir.Node, 0, len(names))
for _, name := range names {
nodes = append(nodes, p.declName(name))
}
return nodes
}
-func (p *noder) declName(name *syntax.Name) *ir.Node {
+func (p *noder) declName(name *syntax.Name) ir.Node {
n := dclname(p.name(name))
n.SetPos(p.pos(name))
return n
}
-func (p *noder) funcDecl(fun *syntax.FuncDecl) *ir.Node {
+func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
name := p.name(fun.Name)
t := p.signature(fun.Recv, fun.Type)
f := p.nod(fun, ir.ODCLFUNC, nil, nil)
return f
}
-func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.Node {
+func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) ir.Node {
n := p.nod(typ, ir.OTFUNC, nil, nil)
if recv != nil {
n.SetLeft(p.param(recv, false, false))
return n
}
-func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(params))
+func (p *noder) params(params []*syntax.Field, dddOk bool) []ir.Node {
+ nodes := make([]ir.Node, 0, len(params))
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
return nodes
}
-func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Node {
+func (p *noder) param(param *syntax.Field, dddOk, final bool) ir.Node {
var name *types.Sym
if param.Name != nil {
name = p.name(param.Name)
return n
}
-func (p *noder) exprList(expr syntax.Expr) []*ir.Node {
+func (p *noder) exprList(expr syntax.Expr) []ir.Node {
if list, ok := expr.(*syntax.ListExpr); ok {
return p.exprs(list.ElemList)
}
- return []*ir.Node{p.expr(expr)}
+ return []ir.Node{p.expr(expr)}
}
-func (p *noder) exprs(exprs []syntax.Expr) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(exprs))
+func (p *noder) exprs(exprs []syntax.Expr) []ir.Node {
+ nodes := make([]ir.Node, 0, len(exprs))
for _, expr := range exprs {
nodes = append(nodes, p.expr(expr))
}
return nodes
}
-func (p *noder) expr(expr syntax.Expr) *ir.Node {
+func (p *noder) expr(expr syntax.Expr) ir.Node {
p.setlineno(expr)
switch expr := expr.(type) {
case nil, *syntax.BadExpr:
op = ir.OSLICE3
}
n := p.nod(expr, op, p.expr(expr.X), nil)
- var index [3]*ir.Node
+ var index [3]ir.Node
for i, x := range &expr.Index {
if x != nil {
index[i] = p.expr(x)
return n
case *syntax.ArrayType:
- var len *ir.Node
+ var len ir.Node
if expr.Len != nil {
len = p.expr(expr.Len)
} else {
// sum efficiently handles very large summation expressions (such as
// in issue #16394). In particular, it avoids left recursion and
// collapses string literals.
-func (p *noder) sum(x syntax.Expr) *ir.Node {
+func (p *noder) sum(x syntax.Expr) ir.Node {
// While we need to handle long sums with asymptotic
// efficiency, the vast majority of sums are very small: ~95%
// have only 2 or 3 operands, and ~99% of string literals are
// handle correctly. For now, we avoid these problems by
// treating named string constants the same as non-constant
// operands.
- var nstr *ir.Node
+ var nstr ir.Node
chunks := make([]string, 0, 1)
n := p.expr(x)
return n
}
-func (p *noder) typeExpr(typ syntax.Expr) *ir.Node {
+func (p *noder) typeExpr(typ syntax.Expr) ir.Node {
// TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
return p.expr(typ)
}
-func (p *noder) typeExprOrNil(typ syntax.Expr) *ir.Node {
+func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Node {
if typ != nil {
return p.expr(typ)
}
panic("unhandled ChanDir")
}
-func (p *noder) structType(expr *syntax.StructType) *ir.Node {
- l := make([]*ir.Node, 0, len(expr.FieldList))
+func (p *noder) structType(expr *syntax.StructType) ir.Node {
+ l := make([]ir.Node, 0, len(expr.FieldList))
for i, field := range expr.FieldList {
p.setlineno(field)
- var n *ir.Node
+ var n ir.Node
if field.Name == nil {
n = p.embedded(field.Type)
} else {
return n
}
-func (p *noder) interfaceType(expr *syntax.InterfaceType) *ir.Node {
- l := make([]*ir.Node, 0, len(expr.MethodList))
+func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node {
+ l := make([]ir.Node, 0, len(expr.MethodList))
for _, method := range expr.MethodList {
p.setlineno(method)
- var n *ir.Node
+ var n ir.Node
if method.Name == nil {
n = p.nodSym(method, ir.ODCLFIELD, importName(p.packname(method.Type)), nil)
} else {
panic(fmt.Sprintf("unexpected packname: %#v", expr))
}
-func (p *noder) embedded(typ syntax.Expr) *ir.Node {
+func (p *noder) embedded(typ syntax.Expr) ir.Node {
op, isStar := typ.(*syntax.Operation)
if isStar {
if op.Op != syntax.Mul || op.Y != nil {
return n
}
-func (p *noder) stmts(stmts []syntax.Stmt) []*ir.Node {
+func (p *noder) stmts(stmts []syntax.Stmt) []ir.Node {
return p.stmtsFall(stmts, false)
}
-func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*ir.Node {
- var nodes []*ir.Node
+func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
+ var nodes []ir.Node
for i, stmt := range stmts {
s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
return nodes
}
-func (p *noder) stmt(stmt syntax.Stmt) *ir.Node {
+func (p *noder) stmt(stmt syntax.Stmt) ir.Node {
return p.stmtFall(stmt, false)
}
-func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *ir.Node {
+func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
p.setlineno(stmt)
switch stmt := stmt.(type) {
case *syntax.EmptyStmt:
}
return p.nod(stmt, op, p.expr(stmt.Call), nil)
case *syntax.ReturnStmt:
- var results []*ir.Node
+ var results []ir.Node
if stmt.Results != nil {
results = p.exprList(stmt.Results)
}
panic("unhandled Stmt")
}
-func (p *noder) assignList(expr syntax.Expr, defn *ir.Node, colas bool) []*ir.Node {
+func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node {
if !colas {
return p.exprList(expr)
}
exprs = []syntax.Expr{expr}
}
- res := make([]*ir.Node, len(exprs))
+ res := make([]ir.Node, len(exprs))
seen := make(map[*types.Sym]bool, len(exprs))
newOrErr := false
return res
}
-func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*ir.Node {
+func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
p.openScope(stmt.Pos())
nodes := p.stmts(stmt.List)
p.closeScope(stmt.Rbrace)
return nodes
}
-func (p *noder) ifStmt(stmt *syntax.IfStmt) *ir.Node {
+func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
p.openScope(stmt.Pos())
n := p.nod(stmt, ir.OIF, nil, nil)
if stmt.Init != nil {
return n
}
-func (p *noder) forStmt(stmt *syntax.ForStmt) *ir.Node {
+func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
p.openScope(stmt.Pos())
- var n *ir.Node
+ var n ir.Node
if r, ok := stmt.Init.(*syntax.RangeClause); ok {
if stmt.Cond != nil || stmt.Post != nil {
panic("unexpected RangeClause")
return n
}
-func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *ir.Node {
+func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
p.openScope(stmt.Pos())
n := p.nod(stmt, ir.OSWITCH, nil, nil)
if stmt.Init != nil {
return n
}
-func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.Node, rbrace syntax.Pos) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(clauses))
+func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch ir.Node, rbrace syntax.Pos) []ir.Node {
+ nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
return nodes
}
-func (p *noder) selectStmt(stmt *syntax.SelectStmt) *ir.Node {
+func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
n := p.nod(stmt, ir.OSELECT, nil, nil)
n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace))
return n
}
-func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.Node {
- nodes := make([]*ir.Node, 0, len(clauses))
+func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []ir.Node {
+ nodes := make([]ir.Node, 0, len(clauses))
for i, clause := range clauses {
p.setlineno(clause)
if i > 0 {
return nodes
}
-func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *ir.Node {
+func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
lhs := p.nodSym(label, ir.OLABEL, nil, p.name(label.Label))
- var ls *ir.Node
+ var ls ir.Node
if label.Stmt != nil { // TODO(mdempsky): Should always be present.
ls = p.stmtFall(label.Stmt, fallOK)
}
lhs.Name().Defn = ls
- l := []*ir.Node{lhs}
+ l := []ir.Node{lhs}
if ls != nil {
if ls.Op() == ir.OBLOCK && ls.Init().Len() == 0 {
l = append(l, ls.List().Slice()...)
return lookup(name.Value)
}
-func (p *noder) mkname(name *syntax.Name) *ir.Node {
+func (p *noder) mkname(name *syntax.Name) ir.Node {
// TODO(mdempsky): Set line number?
return mkname(p.name(name))
}
-func (p *noder) wrapname(n syntax.Node, x *ir.Node) *ir.Node {
+func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
// These nodes do not carry line numbers.
// Introduce a wrapper node to give them the correct line.
switch x.Op() {
return x
}
-func (p *noder) nod(orig syntax.Node, op ir.Op, left, right *ir.Node) *ir.Node {
+func (p *noder) nod(orig syntax.Node, op ir.Op, left, right ir.Node) ir.Node {
return ir.NodAt(p.pos(orig), op, left, right)
}
-func (p *noder) nodSym(orig syntax.Node, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node {
+func (p *noder) nodSym(orig syntax.Node, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
n := nodSym(op, left, sym)
n.SetPos(p.pos(orig))
return n
return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
}
-func mkname(sym *types.Sym) *ir.Node {
+func mkname(sym *types.Sym) ir.Node {
n := oldname(sym)
if n.Name() != nil && n.Name().Pack != nil {
n.Name().Pack.Name().SetUsed(true)
}
}
-func dumpGlobal(n *ir.Node) {
+func dumpGlobal(n ir.Node) {
if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
}
ggloblnod(n)
}
-func dumpGlobalConst(n *ir.Node) {
+func dumpGlobalConst(n ir.Node) {
// only export typed constants
t := n.Type()
if t == nil {
var slicedataGen int
-func slicedata(pos src.XPos, s string) *ir.Node {
+func slicedata(pos src.XPos, s string) ir.Node {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := ir.LocalPkg.Lookup(symname)
return symnode
}
-func slicebytes(nam *ir.Node, s string) {
+func slicebytes(nam ir.Node, s string) {
if nam.Op() != ir.ONAME {
base.Fatalf("slicebytes %v", nam)
}
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n.
// arr must be an ONAME. slicesym does not modify n.
-func slicesym(n, arr *ir.Node, lencap int64) {
+func slicesym(n, arr ir.Node, lencap int64) {
s := n.Sym().Linksym()
off := n.Offset()
if arr.Op() != ir.ONAME {
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
-func addrsym(n, a *ir.Node) {
+func addrsym(n, a ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("addrsym n op %v", n.Op())
}
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
-func pfuncsym(n, f *ir.Node) {
+func pfuncsym(n, f ir.Node) {
if n.Op() != ir.ONAME {
base.Fatalf("pfuncsym n op %v", n.Op())
}
// litsym writes the static literal c to n.
// Neither n nor c is modified.
-func litsym(n, c *ir.Node, wid int) {
+func litsym(n, c ir.Node, wid int) {
if n.Op() != ir.ONAME {
base.Fatalf("litsym n op %v", n.Op())
}
// Order holds state during the ordering process.
type Order struct {
- out []*ir.Node // list of generated statements
- temp []*ir.Node // stack of temporary variables
- free map[string][]*ir.Node // free list of unused temporaries, by type.LongString().
+ out []ir.Node // list of generated statements
+ temp []ir.Node // stack of temporary variables
+ free map[string][]ir.Node // free list of unused temporaries, by type.LongString().
}
// Order rewrites fn.Nbody to apply the ordering constraints
// described in the comment at the top of the file.
-func order(fn *ir.Node) {
+func order(fn ir.Node) {
if base.Flag.W > 1 {
s := fmt.Sprintf("\nbefore order %v", fn.Func().Nname.Sym())
ir.DumpList(s, fn.Body())
}
- orderBlock(fn.PtrBody(), map[string][]*ir.Node{})
+ orderBlock(fn.PtrBody(), map[string][]ir.Node{})
}
// newTemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, newTemp emits code to zero the temporary.
-func (o *Order) newTemp(t *types.Type, clear bool) *ir.Node {
- var v *ir.Node
+func (o *Order) newTemp(t *types.Type, clear bool) ir.Node {
+ var v ir.Node
// Note: LongString is close to the type equality we want,
// but not exactly. We still need to double-check with types.Identical.
key := t.LongString()
// (The other candidate would be map access, but map access
// returns a pointer to the result data instead of taking a pointer
// to be filled in.)
-func (o *Order) copyExpr(n *ir.Node, t *types.Type, clear bool) *ir.Node {
+func (o *Order) copyExpr(n ir.Node, t *types.Type, clear bool) ir.Node {
v := o.newTemp(t, clear)
a := ir.Nod(ir.OAS, v, n)
a = typecheck(a, ctxStmt)
// The definition of cheap is that n is a variable or constant.
// If not, cheapExpr allocates a new tmp, emits tmp = n,
// and then returns tmp.
-func (o *Order) cheapExpr(n *ir.Node) *ir.Node {
+func (o *Order) cheapExpr(n ir.Node) ir.Node {
if n == nil {
return nil
}
// as assigning to the original n.
//
// The intended use is to apply to x when rewriting x += y into x = x + y.
-func (o *Order) safeExpr(n *ir.Node) *ir.Node {
+func (o *Order) safeExpr(n ir.Node) ir.Node {
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
return typecheck(a, ctxExpr)
case ir.OINDEX, ir.OINDEXMAP:
- var l *ir.Node
+ var l ir.Node
if n.Left().Type().IsArray() {
l = o.safeExpr(n.Left())
} else {
// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
-func isaddrokay(n *ir.Node) bool {
+func isaddrokay(n ir.Node) bool {
return islvalue(n) && (n.Op() != ir.ONAME || n.Class() == ir.PEXTERN || ir.IsAutoTmp(n))
}
// tmp = n, and then returns tmp.
// The result of addrTemp MUST be assigned back to n, e.g.
// n.Left = o.addrTemp(n.Left)
-func (o *Order) addrTemp(n *ir.Node) *ir.Node {
+func (o *Order) addrTemp(n ir.Node) ir.Node {
if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
// TODO: expand this to all static composite literal nodes?
n = defaultlit(n, nil)
// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
// It should only be used for map runtime calls which have *_fast* versions.
-func (o *Order) mapKeyTemp(t *types.Type, n *ir.Node) *ir.Node {
+func (o *Order) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
// Most map calls need to take the address of the key.
// Exception: map*_fast* calls. See golang.org/issue/19015.
if mapfast(t) == mapslow {
// It would be nice to handle these generally, but because
// []byte keys are not allowed in maps, the use of string(k)
// comes up in important cases in practice. See issue 3512.
-func mapKeyReplaceStrConv(n *ir.Node) bool {
+func mapKeyReplaceStrConv(n ir.Node) bool {
var replaced bool
switch n.Op() {
case ir.OBYTES2STR:
// cleanTempNoPop emits VARKILL instructions to *out
// for each temporary above the mark on the temporary stack.
// It does not pop the temporaries from the stack.
-func (o *Order) cleanTempNoPop(mark ordermarker) []*ir.Node {
- var out []*ir.Node
+func (o *Order) cleanTempNoPop(mark ordermarker) []ir.Node {
+ var out []ir.Node
for i := len(o.temp) - 1; i >= int(mark); i-- {
n := o.temp[i]
kill := ir.Nod(ir.OVARKILL, n, nil)
// m = OMAKESLICE([]T, x); OCOPY(m, s)
// and rewrites it to:
// m = OMAKESLICECOPY([]T, x, s); nil
-func orderMakeSliceCopy(s []*ir.Node) {
+func orderMakeSliceCopy(s []ir.Node) {
if base.Flag.N != 0 || instrumenting {
return
}
// orderBlock orders the block of statements in n into a new slice,
// and then replaces the old slice in n with the new slice.
// free is a map that can be used to obtain temporary variables by type.
-func orderBlock(n *ir.Nodes, free map[string][]*ir.Node) {
+func orderBlock(n *ir.Nodes, free map[string][]ir.Node) {
var order Order
order.free = free
mark := order.markTemp()
// leaves them as the init list of the final *np.
// The result of exprInPlace MUST be assigned back to n, e.g.
// n.Left = o.exprInPlace(n.Left)
-func (o *Order) exprInPlace(n *ir.Node) *ir.Node {
+func (o *Order) exprInPlace(n ir.Node) ir.Node {
var order Order
order.free = o.free
n = order.expr(n, nil)
// The result of orderStmtInPlace MUST be assigned back to n, e.g.
// n.Left = orderStmtInPlace(n.Left)
// free is a map that can be used to obtain temporary variables by type.
-func orderStmtInPlace(n *ir.Node, free map[string][]*ir.Node) *ir.Node {
+func orderStmtInPlace(n ir.Node, free map[string][]ir.Node) ir.Node {
var order Order
order.free = free
mark := order.markTemp()
}
// init moves n's init list to o.out.
-func (o *Order) init(n *ir.Node) {
+func (o *Order) init(n ir.Node) {
if ir.MayBeShared(n) {
// For concurrency safety, don't mutate potentially shared nodes.
// First, ensure that no work is required here.
// call orders the call expression n.
// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
-func (o *Order) call(n *ir.Node) {
+func (o *Order) call(n ir.Node) {
if n.Init().Len() > 0 {
// Caller should have already called o.init(n).
base.Fatalf("%v with unexpected ninit", n.Op())
if n.Op() == ir.OCALLINTER {
return
}
- keepAlive := func(arg *ir.Node) {
+ keepAlive := func(arg ir.Node) {
// If the argument is really a pointer being converted to uintptr,
// arrange for the pointer to be kept alive until the call returns,
// by copying it into a temp and marking that temp
// cases they are also typically registerizable, so not much harm done.
// And this only applies to the multiple-assignment form.
// We could do a more precise analysis if needed, like in walk.go.
-func (o *Order) mapAssign(n *ir.Node) {
+func (o *Order) mapAssign(n ir.Node) {
switch n.Op() {
default:
base.Fatalf("order.mapAssign %v", n.Op())
o.out = append(o.out, n)
case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2MAPR, ir.OAS2FUNC:
- var post []*ir.Node
+ var post []ir.Node
for i, m := range n.List().Slice() {
switch {
case m.Op() == ir.OINDEXMAP:
// stmt orders the statement n, appending to o.out.
// Temporaries created during the statement are cleaned
// up using VARKILL instructions as possible.
-func (o *Order) stmt(n *ir.Node) {
+func (o *Order) stmt(n ir.Node) {
if n == nil {
return
}
base.Pos = lno
}
-func hasDefaultCase(n *ir.Node) bool {
+func hasDefaultCase(n ir.Node) bool {
for _, ncas := range n.List().Slice() {
if ncas.Op() != ir.OCASE {
base.Fatalf("expected case, found %v", ncas.Op())
}
// prealloc[x] records the allocation to use for x.
-var prealloc = map[*ir.Node]*ir.Node{}
+var prealloc = map[ir.Node]ir.Node{}
// expr orders a single expression, appending side
// effects to o.out as needed.
// to avoid copying the result of the expression to a temporary.)
// The result of expr MUST be assigned back to n, e.g.
// n.Left = o.expr(n.Left, lhs)
-func (o *Order) expr(n, lhs *ir.Node) *ir.Node {
+func (o *Order) expr(n, lhs ir.Node) ir.Node {
if n == nil {
return n
}
// See issue 26552.
entries := n.List().Slice()
statics := entries[:0]
- var dynamics []*ir.Node
+ var dynamics []ir.Node
for _, r := range entries {
if r.Op() != ir.OKEY {
base.Fatalf("OMAPLIT entry not OKEY: %v\n", r)
// okas creates and returns an assignment of val to ok,
// including an explicit conversion if necessary.
-func okas(ok, val *ir.Node) *ir.Node {
+func okas(ok, val ir.Node) ir.Node {
if !ir.IsBlank(ok) {
val = conv(val, ok.Type())
}
// tmp1, tmp2, tmp3 = ...
// a, b, a = tmp1, tmp2, tmp3
// This is necessary to ensure left to right assignment order.
-func (o *Order) as2(n *ir.Node) {
- tmplist := []*ir.Node{}
- left := []*ir.Node{}
+func (o *Order) as2(n ir.Node) {
+ tmplist := []ir.Node{}
+ left := []ir.Node{}
for ni, l := range n.List().Slice() {
if !ir.IsBlank(l) {
tmp := o.newTemp(l.Type(), l.Type().HasPointers())
// okAs2 orders OAS2XXX with ok.
// Just like as2, this also adds temporaries to ensure left-to-right assignment.
-func (o *Order) okAs2(n *ir.Node) {
- var tmp1, tmp2 *ir.Node
+func (o *Order) okAs2(n ir.Node) {
+ var tmp1, tmp2 ir.Node
if !ir.IsBlank(n.List().First()) {
typ := n.Right().Type()
tmp1 = o.newTemp(typ, typ.HasPointers())
// "Portable" code generation.
var (
- compilequeue []*ir.Node // functions waiting to be compiled
+ compilequeue []ir.Node // functions waiting to be compiled
)
-func emitptrargsmap(fn *ir.Node) {
+func emitptrargsmap(fn ir.Node) {
if ir.FuncName(fn) == "_" || fn.Func().Nname.Sym().Linkname != "" {
return
}
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
-func cmpstackvarlt(a, b *ir.Node) bool {
+func cmpstackvarlt(a, b ir.Node) bool {
if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) {
return b.Class() == ir.PAUTO
}
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
-type byStackVar []*ir.Node
+type byStackVar []ir.Node
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
- if n, ok := v.Aux.(*ir.Node); ok {
+ if n, ok := v.Aux.(ir.Node); ok {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
}
-func funccompile(fn *ir.Node) {
+func funccompile(fn ir.Node) {
if Curfn != nil {
base.Fatalf("funccompile %v inside %v", fn.Func().Nname.Sym(), Curfn.Func().Nname.Sym())
}
dclcontext = ir.PEXTERN
}
-func compile(fn *ir.Node) {
+func compile(fn ir.Node) {
errorsBefore := base.Errors()
order(fn)
if base.Errors() > errorsBefore {
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
-func compilenow(fn *ir.Node) bool {
+func compilenow(fn ir.Node) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
-func isInlinableButNotInlined(fn *ir.Node) bool {
+func isInlinableButNotInlined(fn ir.Node) bool {
if fn.Func().Nname.Func().Inl == nil {
return false
}
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
-func compileSSA(fn *ir.Node, worker int) {
+func compileSSA(fn ir.Node, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
- tmp := make([]*ir.Node, len(compilequeue))
+ tmp := make([]ir.Node, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
var wg sync.WaitGroup
base.Ctxt.InParallel = true
- c := make(chan *ir.Node, base.Flag.LowerC)
+ c := make(chan ir.Node, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
- fn := curfn.(*ir.Node)
+ fn := curfn.(ir.Node)
if fn.Func().Nname != nil {
if expect := fn.Func().Nname.Sym().Linksym(); fnsym != expect {
base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
// Deciding the right answer is, as they say, future work.
isODCLFUNC := fn.Op() == ir.ODCLFUNC
- var apdecls []*ir.Node
+ var apdecls []ir.Node
// Populate decls for fn.
if isODCLFUNC {
for _, n := range fn.Func().Dcl {
return scopes, inlcalls
}
-func declPos(decl *ir.Node) src.XPos {
+func declPos(decl ir.Node) src.XPos {
if decl.Name().Defn != nil && (decl.Name().Captured() || decl.Name().Byval()) {
// It's not clear which position is correct for captured variables here:
// * decl.Pos is the wrong position for captured variables, in the inner
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
-func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Node) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) {
+func createSimpleVars(fnsym *obj.LSym, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) {
var vars []*dwarf.Var
- var decls []*ir.Node
- selected := make(map[*ir.Node]bool)
+ var decls []ir.Node
+ selected := make(map[ir.Node]bool)
for _, n := range apDecls {
if ir.IsAutoTmp(n) {
continue
return decls, vars, selected
}
-func createSimpleVar(fnsym *obj.LSym, n *ir.Node) *dwarf.Var {
+func createSimpleVar(fnsym *obj.LSym, n ir.Node) *dwarf.Var {
var abbrev int
offs := n.Offset()
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
-func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Node, []*dwarf.Var, map[*ir.Node]bool) {
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]ir.Node, []*dwarf.Var, map[ir.Node]bool) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
- var decls []*ir.Node
+ var decls []ir.Node
var vars []*dwarf.Var
- ssaVars := make(map[*ir.Node]bool)
+ ssaVars := make(map[ir.Node]bool)
for varID, dvar := range debugInfo.Vars {
n := dvar
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
-func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Node) ([]*ir.Node, []*dwarf.Var) {
+func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []ir.Node) ([]ir.Node, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
- var decls []*ir.Node
- var selected map[*ir.Node]bool
+ var decls []ir.Node
+ var selected map[ir.Node]bool
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
-func preInliningDcls(fnsym *obj.LSym) []*ir.Node {
- fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Node)
- var rdcl []*ir.Node
+func preInliningDcls(fnsym *obj.LSym) []ir.Node {
+ fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(ir.Node)
+ var rdcl []ir.Node
for _, n := range fn.Func().Inl.Dcl {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
return t
}
-func markUsed(n *ir.Node) *ir.Node {
+func markUsed(n ir.Node) ir.Node {
n.Name().SetUsed(true)
return n
}
-func markNeedZero(n *ir.Node) *ir.Node {
+func markNeedZero(n ir.Node) ir.Node {
n.Name().SetNeedzero(true)
return n
}
// Test all code paths for cmpstackvarlt.
func TestCmpstackvar(t *testing.T) {
- nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node {
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
if s == nil {
s = &types.Sym{Name: "."}
}
return n
}
testdata := []struct {
- a, b *ir.Node
+ a, b ir.Node
lt bool
}{
{
}
func TestStackvarSort(t *testing.T) {
- nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Node {
+ nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) ir.Node {
n := NewName(s)
n.SetType(t)
n.SetOffset(xoffset)
n.SetClass(cl)
return n
}
- inp := []*ir.Node{
+ inp := []ir.Node{
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{Name: "abc"}, ir.PAUTO),
nod(0, &types.Type{}, &types.Sym{Name: "xyz"}, ir.PAUTO),
}
- want := []*ir.Node{
+ want := []ir.Node{
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(0, &types.Type{}, &types.Sym{}, ir.PFUNC),
nod(10, &types.Type{}, &types.Sym{}, ir.PFUNC),
}
type phiState struct {
- s *state // SSA state
- f *ssa.Func // function to work on
- defvars []map[*ir.Node]*ssa.Value // defined variables at end of each block
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
- varnum map[*ir.Node]int32 // variable numbering
+ varnum map[ir.Node]int32 // variable numbering
// properties of the dominator tree
idom []*ssa.Block // dominator parents
// Find all the variables for which we need to match up reads & writes.
// This step prunes any basic-block-only variables from consideration.
// Generate a numbering for these variables.
- s.varnum = map[*ir.Node]int32{}
- var vars []*ir.Node
+ s.varnum = map[ir.Node]int32{}
+ var vars []ir.Node
var vartypes []*types.Type
for _, b := range s.f.Blocks {
for _, v := range b.Values {
if v.Op != ssa.OpFwdRef {
continue
}
- var_ := v.Aux.(*ir.Node)
+ var_ := v.Aux.(ir.Node)
// Optimization: look back 1 block for the definition.
if len(b.Preds) == 1 {
}
}
-func (s *phiState) insertVarPhis(n int, var_ *ir.Node, defs []*ssa.Block, typ *types.Type) {
+func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
priq := &s.priq
q := s.q
queued := s.queued
if v.Op != ssa.OpFwdRef {
continue
}
- n := s.varnum[v.Aux.(*ir.Node)]
+ n := s.varnum[v.Aux.(ir.Node)]
v.Op = ssa.OpCopy
v.Aux = nil
v.AddArg(values[n])
// Variant to use for small functions.
type simplePhiState struct {
- s *state // SSA state
- f *ssa.Func // function to work on
- fwdrefs []*ssa.Value // list of FwdRefs to be processed
- defvars []map[*ir.Node]*ssa.Value // defined variables at end of each block
- reachable []bool // which blocks are reachable
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ fwdrefs []*ssa.Value // list of FwdRefs to be processed
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
+ reachable []bool // which blocks are reachable
}
func (s *simplePhiState) insertPhis() {
continue
}
s.fwdrefs = append(s.fwdrefs, v)
- var_ := v.Aux.(*ir.Node)
+ var_ := v.Aux.(ir.Node)
if _, ok := s.defvars[b.ID][var_]; !ok {
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
}
v := s.fwdrefs[len(s.fwdrefs)-1]
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
b := v.Block
- var_ := v.Aux.(*ir.Node)
+ var_ := v.Aux.(ir.Node)
if b == s.f.Entry {
// No variable should be live at entry.
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
}
// lookupVarOutgoing finds the variable's value at the end of block b.
-func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ *ir.Node, line src.XPos) *ssa.Value {
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
for {
if v := s.defvars[b.ID][var_]; v != nil {
return v
// A collection of global state used by liveness analysis.
type Liveness struct {
- fn *ir.Node
+ fn ir.Node
f *ssa.Func
- vars []*ir.Node
- idx map[*ir.Node]int32
+ vars []ir.Node
+ idx map[ir.Node]int32
stkptrsize int64
be []BlockEffects
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
-func livenessShouldTrack(n *ir.Node) bool {
+func livenessShouldTrack(n ir.Node) bool {
return n.Op() == ir.ONAME && (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
// and a map for looking up indices by *Node.
-func getvariables(fn *ir.Node) ([]*ir.Node, map[*ir.Node]int32) {
- var vars []*ir.Node
+func getvariables(fn ir.Node) ([]ir.Node, map[ir.Node]int32) {
+ var vars []ir.Node
for _, n := range fn.Func().Dcl {
if livenessShouldTrack(n) {
vars = append(vars, n)
}
}
- idx := make(map[*ir.Node]int32, len(vars))
+ idx := make(map[ir.Node]int32, len(vars))
for i, n := range vars {
idx[n] = int32(i)
}
}
// affectedNode returns the *Node affected by v
-func affectedNode(v *ssa.Value) (*ir.Node, ssa.SymEffect) {
+func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) {
// Special cases.
switch v.Op {
case ssa.OpLoadReg:
return n, ssa.SymWrite
case ssa.OpVarLive:
- return v.Aux.(*ir.Node), ssa.SymRead
+ return v.Aux.(ir.Node), ssa.SymRead
case ssa.OpVarDef, ssa.OpVarKill:
- return v.Aux.(*ir.Node), ssa.SymWrite
+ return v.Aux.(ir.Node), ssa.SymWrite
case ssa.OpKeepAlive:
n, _ := AutoVar(v.Args[0])
return n, ssa.SymRead
case nil, *obj.LSym:
// ok, but no node
return nil, e
- case *ir.Node:
+ case ir.Node:
return a, e
default:
base.Fatalf("weird aux: %s", v.LongString())
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
-func newliveness(fn *ir.Node, f *ssa.Func, vars []*ir.Node, idx map[*ir.Node]int32, stkptrsize int64) *Liveness {
+func newliveness(fn ir.Node, f *ssa.Func, vars []ir.Node, idx map[ir.Node]int32, stkptrsize int64) *Liveness {
lv := &Liveness{
fn: fn,
f: f,
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
-func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Node, args, locals bvec) {
+func (lv *Liveness) pointerMap(liveout bvec, vars []ir.Node, args, locals bvec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
- var maxArgNode *ir.Node
+ var maxArgNode ir.Node
for _, n := range lv.vars {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT:
return false
}
-func instrument(fn *ir.Node) {
+func instrument(fn ir.Node) {
if fn.Func().Pragma&ir.Norace != 0 {
return
}
)
// range
-func typecheckrange(n *ir.Node) {
+func typecheckrange(n ir.Node) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
decldepth--
}
-func typecheckrangeExpr(n *ir.Node) {
+func typecheckrangeExpr(n ir.Node) {
n.SetRight(typecheck(n.Right(), ctxExpr))
t := n.Right().Type()
base.ErrorfAt(n.Pos(), "too many variables in range")
}
- var v1, v2 *ir.Node
+ var v1, v2 ir.Node
if n.List().Len() != 0 {
v1 = n.List().First()
}
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
-func walkrange(n *ir.Node) *ir.Node {
+func walkrange(n ir.Node) ir.Node {
if isMapClear(n) {
m := n.Right()
lno := setlineno(m)
lno := setlineno(a)
n.SetRight(nil)
- var v1, v2 *ir.Node
+ var v1, v2 ir.Node
l := n.List().Len()
if l > 0 {
v1 = n.List().First()
// to avoid erroneous processing by racewalk.
n.PtrList().Set(nil)
- var ifGuard *ir.Node
+ var ifGuard ir.Node
translatedLoopOp := ir.OFOR
- var body []*ir.Node
- var init []*ir.Node
+ var body []ir.Node
+ var init []ir.Node
switch t.Etype {
default:
base.Fatalf("walkrange")
// for v1 := range ha { body }
if v2 == nil {
- body = []*ir.Node{ir.Nod(ir.OAS, v1, hv1)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
break
}
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(hv1, tmp)
- body = []*ir.Node{a}
+ body = []ir.Node{a}
break
}
if v1 == nil {
body = nil
} else if v2 == nil {
- body = []*ir.Node{ir.Nod(ir.OAS, v1, key)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, key)}
} else {
elem := nodSym(ir.ODOT, hit, elemsym)
elem = ir.Nod(ir.ODEREF, elem, nil)
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(key, elem)
- body = []*ir.Node{a}
+ body = []ir.Node{a}
}
case types.TCHAN:
if v1 == nil {
body = nil
} else {
- body = []*ir.Node{ir.Nod(ir.OAS, v1, hv1)}
+ body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
}
// Zero hv1. This prevents hv1 from being the sole, inaccessible
// reference to an otherwise GC-able value during the next channel receive.
// }
//
// where == for keys of map m is reflexive.
-func isMapClear(n *ir.Node) bool {
+func isMapClear(n ir.Node) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
}
// mapClear constructs a call to runtime.mapclear for the map m.
-func mapClear(m *ir.Node) *ir.Node {
+func mapClear(m ir.Node) ir.Node {
t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
// in which the evaluation of a is side-effect-free.
//
// Parameters are as in walkrange: "for v1, v2 = range a".
-func arrayClear(n, v1, v2, a *ir.Node) bool {
+func arrayClear(n, v1, v2, a ir.Node) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
tmp = conv(tmp, types.Types[types.TUINTPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hn, tmp))
- var fn *ir.Node
+ var fn ir.Node
if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
Curfn.Func().SetWBPos(stmt.Pos())
}
// addptr returns (*T)(uintptr(p) + n).
-func addptr(p *ir.Node, n int64) *ir.Node {
+func addptr(p ir.Node, n int64) ir.Node {
t := p.Type()
p = ir.Nod(ir.OCONVNOP, p, nil)
if receiver != nil {
inLen++
}
- in := make([]*ir.Node, 0, inLen)
+ in := make([]ir.Node, 0, inLen)
if receiver != nil {
d := anonfield(receiver)
}
outLen := f.Results().Fields().Len()
- out := make([]*ir.Node, 0, outLen)
+ out := make([]ir.Node, 0, outLen)
for _, t := range f.Results().Fields().Slice() {
d := anonfield(t.Type)
out = append(out, d)
return s
}
-func typename(t *types.Type) *ir.Node {
+func typename(t *types.Type) ir.Node {
s := typenamesym(t)
if s.Def == nil {
n := ir.NewNameAt(src.NoXPos, s)
return n
}
-func itabname(t, itype *types.Type) *ir.Node {
+func itabname(t, itype *types.Type) ir.Node {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
base.Fatalf("itabname(%v, %v)", t, itype)
}
}
}
-func addsignats(dcls []*ir.Node) {
+func addsignats(dcls []ir.Node) {
// copy types from dcl list to signatset
for _, n := range dcls {
if n.Op() == ir.OTYPE {
// The latter is the type of an auto-generated wrapper.
dtypesym(types.NewPtr(types.Errortype))
- dtypesym(functype(nil, []*ir.Node{anonfield(types.Errortype)}, []*ir.Node{anonfield(types.Types[types.TSTRING])}))
+ dtypesym(functype(nil, []ir.Node{anonfield(types.Errortype)}, []ir.Node{anonfield(types.Types[types.TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly.
dimportpath(Runtimepkg)
// zeroaddr returns the address of a symbol with at least
// size bytes of zeros.
-func zeroaddr(size int64) *ir.Node {
+func zeroaddr(size int64) ir.Node {
if size >= 1<<31 {
base.Fatalf("map elem too big %d", size)
}
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
- analyze func([]*ir.Node, bool)
+ analyze func([]ir.Node, bool)
visitgen uint32
- nodeID map[*ir.Node]uint32
- stack []*ir.Node
+ nodeID map[ir.Node]uint32
+ stack []ir.Node
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
-func visitBottomUp(list []*ir.Node, analyze func(list []*ir.Node, recursive bool)) {
+func visitBottomUp(list []ir.Node, analyze func(list []ir.Node, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
- v.nodeID = make(map[*ir.Node]uint32)
+ v.nodeID = make(map[ir.Node]uint32)
for _, n := range list {
if n.Op() == ir.ODCLFUNC && !n.Func().IsHiddenClosure() {
v.visit(n)
}
}
-func (v *bottomUpVisitor) visit(n *ir.Node) uint32 {
+func (v *bottomUpVisitor) visit(n ir.Node) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
min := v.visitgen
v.stack = append(v.stack, n)
- ir.InspectList(n.Body(), func(n *ir.Node) bool {
+ ir.InspectList(n.Body(), func(n ir.Node) bool {
switch n.Op() {
case ir.ONAME:
if n.Class() == ir.PFUNC {
return marks[i-1].Scope
}
-func assembleScopes(fnsym *obj.LSym, fn *ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
+func assembleScopes(fnsym *obj.LSym, fn ir.Node, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents))
for i, parent := range fn.Func().Parents {
)
// select
-func typecheckselect(sel *ir.Node) {
- var def *ir.Node
+func typecheckselect(sel ir.Node) {
+ var def ir.Node
lno := setlineno(sel)
typecheckslice(sel.Init().Slice(), ctxStmt)
for _, ncase := range sel.List().Slice() {
base.Pos = lno
}
-func walkselect(sel *ir.Node) {
+func walkselect(sel ir.Node) {
lno := setlineno(sel)
if sel.Body().Len() != 0 {
base.Fatalf("double walkselect")
base.Pos = lno
}
-func walkselectcases(cases *ir.Nodes) []*ir.Node {
+func walkselectcases(cases *ir.Nodes) []ir.Node {
ncas := cases.Len()
sellineno := base.Pos
// optimization: zero-case select
if ncas == 0 {
- return []*ir.Node{mkcall("block", nil, nil)}
+ return []ir.Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
- var dflt *ir.Node
+ var dflt ir.Node
for _, cas := range cases.Slice() {
setlineno(cas)
n := cas.Left()
r.SetLeft(typecheck(r.Left(), ctxExpr))
r.PtrBody().Set(cas.Body().Slice())
r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...))
- return []*ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
+ return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
}
if dflt != nil {
ncas--
}
- casorder := make([]*ir.Node, ncas)
+ casorder := make([]ir.Node, ncas)
nsends, nrecvs := 0, 0
- var init []*ir.Node
+ var init []ir.Node
// generate sel-struct
base.Pos = sellineno
// No initialization for order; runtime.selectgo is responsible for that.
order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
- var pc0, pcs *ir.Node
+ var pc0, pcs ir.Node
if base.Flag.Race {
pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
pc0 = typecheck(ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
}
var i int
- var c, elem *ir.Node
+ var c, elem ir.Node
switch n.Op() {
default:
base.Fatalf("select %v", n.Op())
casorder[i] = cas
- setField := func(f string, val *ir.Node) {
+ setField := func(f string, val ir.Node) {
r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
r = typecheck(r, ctxStmt)
init = append(init, r)
}
// dispatch cases
- dispatch := func(cond, cas *ir.Node) {
+ dispatch := func(cond, cas ir.Node) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
}
// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
-func bytePtrToIndex(n *ir.Node, i int64) *ir.Node {
+func bytePtrToIndex(n ir.Node, i int64) ir.Node {
s := ir.Nod(ir.OADDR, ir.Nod(ir.OINDEX, n, nodintconst(i)), nil)
t := types.NewPtr(types.Types[types.TUINT8])
return convnop(s, t)
// Keep in sync with src/runtime/select.go.
func scasetype() *types.Type {
if scase == nil {
- scase = tostruct([]*ir.Node{
+ scase = tostruct([]ir.Node{
namedfield("c", types.Types[types.TUNSAFEPTR]),
namedfield("elem", types.Types[types.TUNSAFEPTR]),
})
)
type InitEntry struct {
- Xoffset int64 // struct, array only
- Expr *ir.Node // bytes of run-time computed expressions
+ Xoffset int64 // struct, array only
+ Expr ir.Node // bytes of run-time computed expressions
}
type InitPlan struct {
type InitSchedule struct {
// out is the ordered list of dynamic initialization
// statements.
- out []*ir.Node
+ out []ir.Node
- initplans map[*ir.Node]*InitPlan
- inittemps map[*ir.Node]*ir.Node
+ initplans map[ir.Node]*InitPlan
+ inittemps map[ir.Node]ir.Node
}
-func (s *InitSchedule) append(n *ir.Node) {
+func (s *InitSchedule) append(n ir.Node) {
s.out = append(s.out, n)
}
// staticInit adds an initialization statement n to the schedule.
-func (s *InitSchedule) staticInit(n *ir.Node) {
+func (s *InitSchedule) staticInit(n ir.Node) {
if !s.tryStaticInit(n) {
if base.Flag.Percent != 0 {
ir.Dump("nonstatic", n)
// tryStaticInit attempts to statically execute an initialization
// statement and reports whether it succeeded.
-func (s *InitSchedule) tryStaticInit(n *ir.Node) bool {
+func (s *InitSchedule) tryStaticInit(n ir.Node) bool {
// Only worry about simple "l = r" assignments. Multiple
// variable/expression OAS2 assignments have already been
// replaced by multiple simple OAS assignments, and the other
// like staticassign but we are copying an already
// initialized value r.
-func (s *InitSchedule) staticcopy(l *ir.Node, r *ir.Node) bool {
+func (s *InitSchedule) staticcopy(l ir.Node, r ir.Node) bool {
if r.Op() != ir.ONAME && r.Op() != ir.OMETHEXPR {
return false
}
return false
}
-func (s *InitSchedule) staticassign(l *ir.Node, r *ir.Node) bool {
+func (s *InitSchedule) staticassign(l ir.Node, r ir.Node) bool {
for r.Op() == ir.OCONVNOP {
r = r.Left()
}
markTypeUsedInInterface(val.Type(), l.Sym().Linksym())
- var itab *ir.Node
+ var itab ir.Node
if l.Type().IsEmptyInterface() {
itab = typename(val.Type())
} else {
// staticname returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
-func staticname(t *types.Type) *ir.Node {
+func staticname(t *types.Type) ir.Node {
// Don't use lookupN; it interns the resulting string, but these are all unique.
n := NewName(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
}
// readonlystaticname returns a name backed by a (writable) static data symbol.
-func readonlystaticname(t *types.Type) *ir.Node {
+func readonlystaticname(t *types.Type) ir.Node {
n := staticname(t)
n.MarkReadonly()
n.Sym().Linksym().Set(obj.AttrContentAddressable, true)
return n
}
-func isSimpleName(n *ir.Node) bool {
+func isSimpleName(n ir.Node) bool {
return (n.Op() == ir.ONAME || n.Op() == ir.OMETHEXPR) && n.Class() != ir.PAUTOHEAP && n.Class() != ir.PEXTERN
}
-func litas(l *ir.Node, r *ir.Node, init *ir.Nodes) {
+func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
a := ir.Nod(ir.OAS, l, r)
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
// getdyn calculates the initGenType for n.
// If top is false, getdyn is recursing.
-func getdyn(n *ir.Node, top bool) initGenType {
+func getdyn(n ir.Node, top bool) initGenType {
switch n.Op() {
default:
if isGoConst(n) {
}
// isStaticCompositeLiteral reports whether n is a compile-time constant.
-func isStaticCompositeLiteral(n *ir.Node) bool {
+func isStaticCompositeLiteral(n ir.Node) bool {
switch n.Op() {
case ir.OSLICELIT:
return false
// fixedlit handles struct, array, and slice literals.
// TODO: expand documentation.
-func fixedlit(ctxt initContext, kind initKind, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
+func fixedlit(ctxt initContext, kind initKind, n ir.Node, var_ ir.Node, init *ir.Nodes) {
isBlank := var_ == ir.BlankNode
- var splitnode func(*ir.Node) (a *ir.Node, value *ir.Node)
+ var splitnode func(ir.Node) (a ir.Node, value ir.Node)
switch n.Op() {
case ir.OARRAYLIT, ir.OSLICELIT:
var k int64
- splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) {
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
if r.Op() == ir.OKEY {
k = indexconst(r.Left())
if k < 0 {
return a, r
}
case ir.OSTRUCTLIT:
- splitnode = func(r *ir.Node) (*ir.Node, *ir.Node) {
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
if r.Op() != ir.OSTRUCTKEY {
base.Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
}
case initKindStatic:
genAsStatic(a)
case initKindDynamic, initKindLocalCode:
- a = orderStmtInPlace(a, map[string][]*ir.Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
default:
}
}
-func isSmallSliceLit(n *ir.Node) bool {
+func isSmallSliceLit(n ir.Node) bool {
if n.Op() != ir.OSLICELIT {
return false
}
return smallintconst(r) && (n.Type().Elem().Width == 0 || r.Int64Val() <= smallArrayBytes/n.Type().Elem().Width)
}
-func slicelit(ctxt initContext, n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
+func slicelit(ctxt initContext, n ir.Node, var_ ir.Node, init *ir.Nodes) {
// make an array type corresponding the number of elements we have
t := types.NewArray(n.Type().Elem(), n.Right().Int64Val())
dowidth(t)
// if the literal contains constants,
// make static initialized array (1),(2)
- var vstat *ir.Node
+ var vstat ir.Node
mode := getdyn(n, true)
if mode&initConst != 0 && !isSmallSliceLit(n) {
vauto := temp(types.NewPtr(t))
// set auto to point at new temp or heap (3 assign)
- var a *ir.Node
+ var a ir.Node
if x := prealloc[n]; x != nil {
// temp allocated during order.go for dddarg
if !types.Identical(t, x.Type()) {
a = ir.Nod(ir.OAS, a, value)
a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*ir.Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
}
a = ir.Nod(ir.OAS, var_, ir.Nod(ir.OSLICE, vauto, nil))
a = typecheck(a, ctxStmt)
- a = orderStmtInPlace(a, map[string][]*ir.Node{})
+ a = orderStmtInPlace(a, map[string][]ir.Node{})
a = walkstmt(a)
init.Append(a)
}
-func maplit(n *ir.Node, m *ir.Node, init *ir.Nodes) {
+func maplit(n ir.Node, m ir.Node, init *ir.Nodes) {
// make the map var
a := ir.Nod(ir.OMAKE, nil, nil)
a.SetEsc(n.Esc())
init.Append(a)
}
-func anylit(n *ir.Node, var_ *ir.Node, init *ir.Nodes) {
+func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
t := n.Type()
switch n.Op() {
default:
base.Fatalf("anylit: not ptr")
}
- var r *ir.Node
+ var r ir.Node
if n.Right() != nil {
// n.Right is stack temporary used as backing store.
init.Append(ir.Nod(ir.OAS, n.Right(), nil)) // zero backing store, just in case (#18410)
}
}
-func oaslit(n *ir.Node, init *ir.Nodes) bool {
+func oaslit(n ir.Node, init *ir.Nodes) bool {
if n.Left() == nil || n.Right() == nil {
// not a special composite literal assignment
return false
return true
}
-func getlit(lit *ir.Node) int {
+func getlit(lit ir.Node) int {
if smallintconst(lit) {
return int(lit.Int64Val())
}
}
// stataddr returns the static address of n, if n has one, or else nil.
-func stataddr(n *ir.Node) *ir.Node {
+func stataddr(n ir.Node) ir.Node {
if n == nil {
return nil
}
return nil
}
-func (s *InitSchedule) initplan(n *ir.Node) {
+func (s *InitSchedule) initplan(n ir.Node) {
if s.initplans[n] != nil {
return
}
}
}
-func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n *ir.Node) {
+func (s *InitSchedule) addvalue(p *InitPlan, xoffset int64, n ir.Node) {
// special case: zero can be dropped entirely
if isZero(n) {
return
p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
}
-func isZero(n *ir.Node) bool {
+func isZero(n ir.Node) bool {
switch n.Op() {
case ir.ONIL:
return true
return false
}
-func isvaluelit(n *ir.Node) bool {
+func isvaluelit(n ir.Node) bool {
return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
}
-func genAsStatic(as *ir.Node) {
+func genAsStatic(as ir.Node) {
if as.Left().Type() == nil {
base.Fatalf("genAsStatic as.Left not typechecked")
}
const maxOpenDefers = 8
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
-var ssaDumpInlined []*ir.Node
+var ssaDumpInlined []ir.Node
func initssaconfig() {
types_ := ssa.NewTypes()
// function/method/interface call), where the receiver of a method call is
// considered as the 0th parameter. This does not include the receiver of an
// interface call.
-func getParam(n *ir.Node, i int) *types.Field {
+func getParam(n ir.Node, i int) *types.Field {
t := n.Left().Type()
if n.Op() == ir.OCALLMETH {
if i == 0 {
// buildssa builds an SSA function for fn.
// worker indicates which of the backend workers is doing the processing.
-func buildssa(fn *ir.Node, worker int) *ssa.Func {
+func buildssa(fn ir.Node, worker int) *ssa.Func {
name := ir.FuncName(fn)
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
// Allocate starting values
s.labels = map[string]*ssaLabel{}
- s.labeledNodes = map[*ir.Node]*ssaLabel{}
- s.fwdVars = map[*ir.Node]*ssa.Value{}
+ s.labeledNodes = map[ir.Node]*ssaLabel{}
+ s.fwdVars = map[ir.Node]*ssa.Value{}
s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.Func().OpenCodedDeferDisallowed()
}
// Generate addresses of local declarations
- s.decladdrs = map[*ir.Node]*ssa.Value{}
+ s.decladdrs = map[ir.Node]*ssa.Value{}
var args []ssa.Param
var results []ssa.Param
for _, n := range fn.Func().Dcl {
return s.f
}
-func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Node) {
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn ir.Node) {
// Read sources of target function fn.
fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Func().Endlineno.Line())
// Information about each open-coded defer.
type openDeferInfo struct {
// The ODEFER node representing the function call of the defer
- n *ir.Node
+ n ir.Node
// If defer call is closure call, the address of the argtmp where the
// closure is stored.
closure *ssa.Value
// The node representing the argtmp where the closure is stored - used for
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
- closureNode *ir.Node
+ closureNode ir.Node
// If defer call is interface call, the address of the argtmp where the
// receiver is stored
rcvr *ssa.Value
// The node representing the argtmp where the receiver is stored
- rcvrNode *ir.Node
+ rcvrNode ir.Node
// The addresses of the argtmps where the evaluated arguments of the defer
// function call are stored.
argVals []*ssa.Value
// The nodes representing the argtmps where the args of the defer are stored
- argNodes []*ir.Node
+ argNodes []ir.Node
}
type state struct {
f *ssa.Func
// Node for function
- curfn *ir.Node
+ curfn ir.Node
// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
labels map[string]*ssaLabel
- labeledNodes map[*ir.Node]*ssaLabel
+ labeledNodes map[ir.Node]*ssaLabel
// unlabeled break and continue statement tracking
breakTo *ssa.Block // current target for plain break statement
// variable assignments in the current block (map from variable symbol to ssa value)
// *Node is the unique identifier (an ONAME Node) for the variable.
// TODO: keep a single varnum map, then make all of these maps slices instead?
- vars map[*ir.Node]*ssa.Value
+ vars map[ir.Node]*ssa.Value
// fwdVars are variables that are used before they are defined in the current block.
// This map exists just to coalesce multiple references into a single FwdRef op.
// *Node is the unique identifier (an ONAME Node) for the variable.
- fwdVars map[*ir.Node]*ssa.Value
+ fwdVars map[ir.Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
- defvars []map[*ir.Node]*ssa.Value
+ defvars []map[ir.Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
- decladdrs map[*ir.Node]*ssa.Value
+ decladdrs map[ir.Node]*ssa.Value
// starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sb *ssa.Value
// value representing address of where deferBits autotmp is stored
deferBitsAddr *ssa.Value
- deferBitsTemp *ir.Node
+ deferBitsTemp ir.Node
// line number stack. The current line number is top of stack
line []src.XPos
panics map[funcLine]*ssa.Block
// list of PPARAMOUT (return) variables.
- returns []*ir.Node
+ returns []ir.Node
cgoUnsafeArgs bool
hasdefer bool // whether the function contains a defer statement
func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
-func ssaMarker(name string) *ir.Node {
+func ssaMarker(name string) ir.Node {
return NewName(&types.Sym{Name: name})
}
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
- s.vars = map[*ir.Node]*ssa.Value{}
+ s.vars = map[ir.Node]*ssa.Value{}
for n := range s.fwdVars {
delete(s.fwdVars, n)
}
}
// stmt converts the statement n to SSA and adds it to s.
-func (s *state) stmt(n *ir.Node) {
+func (s *state) stmt(n ir.Node) {
if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
s.pushLine(n.Pos())
}
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
-func (s *state) expr(n *ir.Node) *ssa.Value {
+func (s *state) expr(n ir.Node) *ssa.Value {
if hasUniquePos(n) {
// ONAMEs and named OLITERALs have the line number
// of the decl, not the use. See issue 14742.
// If inplace is true, it writes the result of the OAPPEND expression n
// back to the slice being appended to, and returns nil.
// inplace MUST be set to false if the slice can be SSA'd.
-func (s *state) append(n *ir.Node, inplace bool) *ssa.Value {
+func (s *state) append(n ir.Node, inplace bool) *ssa.Value {
// If inplace is false, process as expression "append(s, e1, e2, e3)":
//
// ptr, len, cap := s
// if cond is true and no if cond is false.
// This function is intended to handle && and || better than just calling
// s.expr(cond) and branching on the result.
-func (s *state) condBranch(cond *ir.Node, yes, no *ssa.Block, likely int8) {
+func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
switch cond.Op() {
case ir.OANDAND:
mid := s.f.NewBlock(ssa.BlockPlain)
// If deref is true, then we do left = *right instead (and right has already been nil-checked).
// If deref is true and right == nil, just do left = 0.
// skip indicates assignments (at the top level) that can be avoided.
-func (s *state) assign(left *ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
if left.Op() == ir.ONAME && ir.IsBlank(left) {
return
}
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic. args is a list of arguments to the func.
-type intrinsicBuilder func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value
+type intrinsicBuilder func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
arch *sys.Arch
/******** runtime ********/
if !instrumenting {
add("runtime", "slicebytetostringtmp",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OBYTES2STRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
all...)
}
addF("runtime/internal/math", "MulUintptr",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
}
},
sys.AMD64, sys.I386, sys.MIPS64)
add("runtime", "KeepAlive",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
return nil
},
all...)
add("runtime", "getclosureptr",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallerpc",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
},
all...)
add("runtime", "getcallersp",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
},
all...)
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Ctz64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
addF("runtime/internal/sys", "Bswap32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
addF("runtime/internal/sys", "Bswap64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
/******** runtime/internal/atomic ********/
addF("runtime/internal/atomic", "Load",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Load64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "LoadAcq64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.PPC64)
addF("runtime/internal/atomic", "Loadp",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Store64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StorepNoWB",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "StoreRel64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.PPC64)
addF("runtime/internal/atomic", "Xchg",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xchg64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
- type atomicOpEmitter func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType)
+ type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType)
makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
- return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
v := s.load(types.Types[types.TBOOL], addr)
}
}
- atomicXchgXaddEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
sys.ARM64)
addF("runtime/internal/atomic", "Xadd",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Xadd64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
sys.ARM64)
addF("runtime/internal/atomic", "Cas",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "Cas64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
addF("runtime/internal/atomic", "CasRel",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
},
sys.PPC64)
- atomicCasEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
sys.ARM64)
addF("runtime/internal/atomic", "And8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "And",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
addF("runtime/internal/atomic", "Or",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
return nil
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
- atomicAndOrEmitterARM64 := func(s *state, n *ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
/******** math ********/
addF("math", "Sqrt",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
},
sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
addF("math", "Trunc",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Ceil",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Floor",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
addF("math", "Round",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "RoundToEven",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.S390X, sys.Wasm)
addF("math", "Abs",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
},
sys.ARM64, sys.ARM, sys.PPC64, sys.Wasm)
addF("math", "Copysign",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
},
sys.PPC64, sys.Wasm)
addF("math", "FMA",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
},
sys.ARM64, sys.PPC64, sys.S390X)
addF("math", "FMA",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
},
sys.AMD64)
addF("math", "FMA",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[types.TFLOAT64])
},
sys.ARM)
- makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasSSE41)
b := s.endBlock()
b.Kind = ssa.BlockIf
/******** math/bits ********/
addF("math/bits", "TrailingZeros64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<16)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
},
sys.MIPS)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<16)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
},
sys.S390X, sys.PPC64)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
c := s.constInt32(types.Types[types.TUINT32], 1<<8)
y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
},
sys.MIPS)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
addF("math/bits", "TrailingZeros8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
c := s.constInt64(types.Types[types.TUINT64], 1<<8)
y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
// ReverseBytes inlines correctly, no need to intrinsify it.
// ReverseBytes16 lowers to a rotate, no need for anything special here.
addF("math/bits", "Len64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
},
sys.AMD64, sys.ARM64)
addF("math/bits", "Len32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
},
sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
},
sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
addF("math/bits", "Len8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
},
sys.AMD64)
addF("math/bits", "Len",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
}
sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
// LeadingZeros is handled because it trivially calls Len.
addF("math/bits", "Reverse64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
},
sys.ARM64)
addF("math/bits", "Reverse",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
if s.config.PtrSize == 4 {
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
}
},
sys.ARM64)
addF("math/bits", "RotateLeft8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
},
sys.AMD64)
addF("math/bits", "RotateLeft32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
},
sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "RotateLeft64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
- makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
- return func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], x86HasPOPCNT)
b := s.endBlock()
b.Kind = ssa.BlockIf
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
sys.AMD64)
addF("math/bits", "OnesCount64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "OnesCount32",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
},
sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
sys.AMD64)
addF("math/bits", "OnesCount16",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
},
sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
addF("math/bits", "OnesCount8",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
},
sys.S390X, sys.PPC64, sys.Wasm)
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
addF("math/bits", "Mul64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64)
alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE)
addF("math/bits", "Add64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
addF("math/bits", "Sub64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
},
sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
addF("math/bits", "Div64",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// check for divide-by-zero/overflow and panic with appropriate message
cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
s.check(cmpZero, panicdivide)
/******** math/big ********/
add("math/big", "mulWW",
- func(s *state, n *ir.Node, args []*ssa.Value) *ssa.Value {
+ func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
},
sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
}
-func isIntrinsicCall(n *ir.Node) bool {
+func isIntrinsicCall(n ir.Node) bool {
if n == nil || n.Left() == nil {
return false
}
}
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
-func (s *state) intrinsicCall(n *ir.Node) *ssa.Value {
+func (s *state) intrinsicCall(n ir.Node) *ssa.Value {
v := findIntrinsic(n.Left().Sym())(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
-func (s *state) intrinsicArgs(n *ir.Node) []*ssa.Value {
+func (s *state) intrinsicArgs(n ir.Node) []*ssa.Value {
// Construct map of temps; see comments in s.call about the structure of n.
- temps := map[*ir.Node]*ssa.Value{}
+ temps := map[ir.Node]*ssa.Value{}
for _, a := range n.List().Slice() {
if a.Op() != ir.OAS {
s.Fatalf("non-assignment as a temp function argument %v", a.Op())
// call. We will also record funcdata information on where the args are stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
-func (s *state) openDeferRecord(n *ir.Node) {
+func (s *state) openDeferRecord(n ir.Node) {
// Do any needed expression evaluation for the args (including the
// receiver, if any). This may be evaluating something like 'autotmp_3 =
// once.mutex'. Such a statement will create a mapping in s.vars[] from
s.stmtList(n.List())
var args []*ssa.Value
- var argNodes []*ir.Node
+ var argNodes []ir.Node
opendefer := &openDeferInfo{
n: n,
// call the function directly if it is a static function.
closureVal := s.expr(fn)
closure := s.openDeferSave(nil, fn.Type(), closureVal)
- opendefer.closureNode = closure.Aux.(*ir.Node)
+ opendefer.closureNode = closure.Aux.(ir.Node)
if !(fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC) {
opendefer.closure = closure
}
// runtime panic code to use. But in the defer exit code, we will
// call the method directly.
closure := s.openDeferSave(nil, fn.Type(), closureVal)
- opendefer.closureNode = closure.Aux.(*ir.Node)
+ opendefer.closureNode = closure.Aux.(ir.Node)
} else {
if fn.Op() != ir.ODOTINTER {
base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
// Important to get the receiver type correct, so it is recognized
// as a pointer for GC purposes.
opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
- opendefer.closureNode = opendefer.closure.Aux.(*ir.Node)
- opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Node)
+ opendefer.closureNode = opendefer.closure.Aux.(ir.Node)
+ opendefer.rcvrNode = opendefer.rcvr.Aux.(ir.Node)
}
for _, argn := range n.Rlist().Slice() {
var v *ssa.Value
v = s.openDeferSave(argn, argn.Type(), nil)
}
args = append(args, v)
- argNodes = append(argNodes, v.Aux.(*ir.Node))
+ argNodes = append(argNodes, v.Aux.(ir.Node))
}
opendefer.argVals = args
opendefer.argNodes = argNodes
// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n *ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
+func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
canSSA := canSSAType(t)
var pos src.XPos
if canSSA {
}
}
-func (s *state) callResult(n *ir.Node, k callKind) *ssa.Value {
+func (s *state) callResult(n ir.Node, k callKind) *ssa.Value {
return s.call(n, k, false)
}
-func (s *state) callAddr(n *ir.Node, k callKind) *ssa.Value {
+func (s *state) callAddr(n ir.Node, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
-func (s *state) call(n *ir.Node, k callKind, returnResultAddr bool) *ssa.Value {
+func (s *state) call(n ir.Node, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
}
// getMethodClosure returns a value representing the closure for a method call
-func (s *state) getMethodClosure(fn *ir.Node) *ssa.Value {
+func (s *state) getMethodClosure(fn ir.Node) *ssa.Value {
// Make a name n2 for the function.
// fn.Sym might be sync.(*Mutex).Unlock.
// Make a PFUNC node out of that, then evaluate it.
// getClosureAndRcvr returns values for the appropriate closure and receiver of an
// interface call
-func (s *state) getClosureAndRcvr(fn *ir.Node) (*ssa.Value, *ssa.Value) {
+func (s *state) getClosureAndRcvr(fn ir.Node) (*ssa.Value, *ssa.Value) {
i := s.expr(fn.Left())
itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
s.nilCheck(itab)
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
// The value that the returned Value represents is guaranteed to be non-nil.
-func (s *state) addr(n *ir.Node) *ssa.Value {
+func (s *state) addr(n ir.Node) *ssa.Value {
if n.Op() != ir.ONAME {
s.pushLine(n.Pos())
defer s.popLine()
// canSSA reports whether n is SSA-able.
// n must be an ONAME (or an ODOT sequence with an ONAME base).
-func (s *state) canSSA(n *ir.Node) bool {
+func (s *state) canSSA(n ir.Node) bool {
if base.Flag.N != 0 {
return false
}
}
// exprPtr evaluates n to a pointer and nil-checks it.
-func (s *state) exprPtr(n *ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
+func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
p := s.expr(n)
if bounded || n.NonNil() {
if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
s.startBlock(bNext)
}
-func (s *state) intDivide(n *ir.Node, a, b *ssa.Value) *ssa.Value {
+func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
needcheck := true
switch b.Op {
case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
-func (s *state) putArg(n *ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
+func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
var a *ssa.Value
if forLateExpandedCall {
if !canSSAType(t) {
return ssa.Param{Type: t, Offset: int32(off)}, a
}
-func (s *state) storeArgWithBase(n *ir.Node, t *types.Type, base *ssa.Value, off int64) {
+func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
var addr *ssa.Value
if base == s.sp {
one: (*state).constInt64,
}
-func (s *state) uint64Tofloat64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
}
-func (s *state) uint64Tofloat32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
}
-func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = (floatY) x
// } else {
cvtF2F: ssa.OpCvt64Fto32F,
}
-func (s *state) uint32Tofloat64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
}
-func (s *state) uint32Tofloat32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
}
-func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// if x >= 0 {
// result = floatY(x)
// } else {
}
// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
-func (s *state) referenceTypeBuiltin(n *ir.Node, x *ssa.Value) *ssa.Value {
+func (s *state) referenceTypeBuiltin(n ir.Node, x *ssa.Value) *ssa.Value {
if !n.Left().Type().IsMap() && !n.Left().Type().IsChan() {
s.Fatalf("node must be a map or a channel")
}
cutoff: 1 << 31,
}
-func (s *state) float32ToUint64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u64, n, x, ft, tt)
}
-func (s *state) float64ToUint64(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u64, n, x, ft, tt)
}
-func (s *state) float32ToUint32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f32_u32, n, x, ft, tt)
}
-func (s *state) float64ToUint32(n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.floatToUint(&f64_u32, n, x, ft, tt)
}
-func (s *state) floatToUint(cvttab *f2uCvtTab, n *ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
// result = uintY(x)
// dottype generates SSA for a type assertion node.
// commaok indicates whether to panic or return a bool.
// If commaok is false, resok will be nil.
-func (s *state) dottype(n *ir.Node, commaok bool) (res, resok *ssa.Value) {
+func (s *state) dottype(n ir.Node, commaok bool) (res, resok *ssa.Value) {
iface := s.expr(n.Left()) // input interface
target := s.expr(n.Right()) // target type
byteptr := s.f.Config.Types.BytePtr
targetITab = s.expr(n.List().First())
}
- var tmp *ir.Node // temporary for use with large types
+ var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !canSSAType(n.Type()) {
// unSSAable type, use temporary.
}
// variable returns the value of a variable at the current location.
-func (s *state) variable(name *ir.Node, t *types.Type) *ssa.Value {
+func (s *state) variable(name ir.Node, t *types.Type) *ssa.Value {
v := s.vars[name]
if v != nil {
return v
return s.variable(memVar, types.TypeMem)
}
-func (s *state) addNamedValue(n *ir.Node, v *ssa.Value) {
+func (s *state) addNamedValue(n ir.Node, v *ssa.Value) {
if n.Class() == ir.Pxxx {
// Don't track our marker nodes (memVar etc.).
return
bstart []*obj.Prog
// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8.
- ScratchFpMem *ir.Node
+ ScratchFpMem ir.Node
maxarg int64 // largest frame size for arguments to calls made by the function
}
// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
-type byXoffset []*ir.Node
+type byXoffset []ir.Node
func (s byXoffset) Len() int { return len(s) }
func (s byXoffset) Less(i, j int) bool { return s[i].Offset() < s[j].Offset() }
func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func emitStackObjects(e *ssafn, pp *Progs) {
- var vars []*ir.Node
+ var vars []ir.Node
for _, n := range e.curfn.Func().Dcl {
if livenessShouldTrack(n) && n.Name().Addrtaken() {
vars = append(vars, n)
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
- case *ir.Node:
+ case ir.Node:
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
a.Sym = n.Orig().Sym().Linksym()
// AutoVar returns a *Node and int64 representing the auto variable and offset within it
// where v should be spilled.
-func AutoVar(v *ssa.Value) (*ir.Node, int64) {
+func AutoVar(v *ssa.Value) (ir.Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
// fieldIdx finds the index of the field referred to by the ODOT node n.
-func fieldIdx(n *ir.Node) int {
+func fieldIdx(n ir.Node) int {
t := n.Left().Type()
f := n.Sym()
if !t.IsStruct() {
// ssafn holds frontend information about a function that the backend is processing.
// It also exports a bunch of compiler services for the ssa backend.
type ssafn struct {
- curfn *ir.Node
+ curfn ir.Node
strings map[string]*obj.LSym // map from constant string to data symbols
- scratchFpMem *ir.Node // temp for floating point register / memory moves on some architectures
+ scratchFpMem ir.Node // temp for floating point register / memory moves on some architectures
stksize int64 // stack size for current frame
stkptrsize int64 // prefix of stack containing pointers
log bool // print ssa debug to the stdout
return data
}
-func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Node {
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) ir.Node {
n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
return n
}
return base.Ctxt.Pkgpath
}
-func clobberBase(n *ir.Node) *ir.Node {
+func clobberBase(n ir.Node) ir.Node {
if n.Op() == ir.ODOT && n.Left().Type().NumFields() == 1 {
return clobberBase(n.Left())
}
// It's primarily used to distinguish references to named objects,
// whose Pos will point back to their declaration position rather than
// their usage position.
-func hasUniquePos(n *ir.Node) bool {
+func hasUniquePos(n ir.Node) bool {
switch n.Op() {
case ir.ONAME, ir.OPACK:
return false
return true
}
-func setlineno(n *ir.Node) src.XPos {
+func setlineno(n ir.Node) src.XPos {
lno := base.Pos
if n != nil && hasUniquePos(n) {
base.Pos = n.Pos()
// find all the exported symbols in package opkg
// and make them available in the current package
-func importdot(opkg *types.Pkg, pack *ir.Node) {
+func importdot(opkg *types.Pkg, pack ir.Node) {
n := 0
for _, s := range opkg.Syms {
if s.Def == nil {
}
// newname returns a new ONAME Node associated with symbol s.
-func NewName(s *types.Sym) *ir.Node {
+func NewName(s *types.Sym) ir.Node {
n := ir.NewNameAt(base.Pos, s)
n.Name().Curfn = Curfn
return n
// nodSym makes a Node with Op op and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodSym(op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node {
+func nodSym(op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
return nodlSym(base.Pos, op, left, sym)
}
// nodlSym makes a Node with position Pos, with Op op, and with the Left field set to left
// and the Sym field set to sym. This is for ODOT and friends.
-func nodlSym(pos src.XPos, op ir.Op, left *ir.Node, sym *types.Sym) *ir.Node {
+func nodlSym(pos src.XPos, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
n := ir.NodAt(pos, op, left, nil)
n.SetSym(sym)
return n
func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methcmp) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-func nodintconst(v int64) *ir.Node {
+func nodintconst(v int64) ir.Node {
return ir.NewLiteral(constant.MakeInt64(v))
}
-func nodnil() *ir.Node {
+func nodnil() ir.Node {
n := ir.Nod(ir.ONIL, nil, nil)
n.SetType(types.Types[types.TNIL])
return n
}
-func nodbool(b bool) *ir.Node {
+func nodbool(b bool) ir.Node {
return ir.NewLiteral(constant.MakeBool(b))
}
-func nodstr(s string) *ir.Node {
+func nodstr(s string) ir.Node {
return ir.NewLiteral(constant.MakeString(s))
}
// ONAME, OLITERAL, OTYPE, and ONONAME leaves.
// If pos.IsKnown(), it sets the source position of newly
// allocated nodes to pos.
-func treecopy(n *ir.Node, pos src.XPos) *ir.Node {
+func treecopy(n ir.Node, pos src.XPos) ir.Node {
if n == nil {
return nil
}
return ir.OXXX, ""
}
-func assignconv(n *ir.Node, t *types.Type, context string) *ir.Node {
+func assignconv(n ir.Node, t *types.Type, context string) ir.Node {
return assignconvfn(n, t, func() string { return context })
}
// Convert node n for assignment to type t.
-func assignconvfn(n *ir.Node, t *types.Type, context func() string) *ir.Node {
+func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
if n == nil || n.Type() == nil || n.Type().Broke() {
return n
}
// backingArrayPtrLen extracts the pointer and length from a slice or string.
// This constructs two nodes referring to n, so n must be a cheapexpr.
-func backingArrayPtrLen(n *ir.Node) (ptr, len *ir.Node) {
+func backingArrayPtrLen(n ir.Node) (ptr, len ir.Node) {
var init ir.Nodes
c := cheapexpr(n, &init)
if c != n || init.Len() != 0 {
// labeledControl returns the control flow Node (for, switch, select)
// associated with the label n, if any.
-func labeledControl(n *ir.Node) *ir.Node {
+func labeledControl(n ir.Node) ir.Node {
if n.Op() != ir.OLABEL {
base.Fatalf("labeledControl %v", n.Op())
}
return nil
}
-func syslook(name string) *ir.Node {
+func syslook(name string) ir.Node {
s := Runtimepkg.Lookup(name)
if s == nil || s.Def == nil {
base.Fatalf("syslook: can't find runtime.%s", name)
// updateHasCall checks whether expression n contains any function
// calls and sets the n.HasCall flag if so.
-func updateHasCall(n *ir.Node) {
+func updateHasCall(n ir.Node) {
if n == nil {
return
}
n.SetHasCall(calcHasCall(n))
}
-func calcHasCall(n *ir.Node) bool {
+func calcHasCall(n ir.Node) bool {
if n.Init().Len() != 0 {
// TODO(mdempsky): This seems overly conservative.
return true
// return side effect-free n, appending side effects to init.
// result is assignable if n is.
-func safeexpr(n *ir.Node, init *ir.Nodes) *ir.Node {
+func safeexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return nil
}
return cheapexpr(n, init)
}
-func copyexpr(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node {
+func copyexpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
l := temp(t)
a := ir.Nod(ir.OAS, l, n)
a = typecheck(a, ctxStmt)
// return side-effect free and cheap n, appending side effects to init.
// result may not be assignable.
-func cheapexpr(n *ir.Node, init *ir.Nodes) *ir.Node {
+func cheapexpr(n ir.Node, init *ir.Nodes) ir.Node {
switch n.Op() {
case ir.ONAME, ir.OLITERAL, ir.ONIL:
return n
// find missing fields that
// will give shortest unique addressing.
// modify the tree with missing type names.
-func adddot(n *ir.Node) *ir.Node {
+func adddot(n ir.Node) ir.Node {
n.SetLeft(typecheck(n.Left(), ctxType|ctxExpr))
if n.Left().Diag() {
n.SetDiag(true)
}
// Given funarg struct list, return list of ODCLFIELD Node fn args.
-func structargs(tl *types.Type, mustname bool) []*ir.Node {
- var args []*ir.Node
+func structargs(tl *types.Type, mustname bool) []ir.Node {
+ var args []ir.Node
gen := 0
for _, t := range tl.Fields().Slice() {
s := t.Sym
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
inlcalls(fn)
}
- escapeFuncs([]*ir.Node{fn}, false)
+ escapeFuncs([]ir.Node{fn}, false)
Curfn = nil
xtop = append(xtop, fn)
}
-func paramNnames(ft *types.Type) []*ir.Node {
- args := make([]*ir.Node, ft.NumParams())
+func paramNnames(ft *types.Type) []ir.Node {
+ args := make([]ir.Node, ft.NumParams())
for i, f := range ft.Params().FieldSlice() {
args[i] = ir.AsNode(f.Nname)
}
return args
}
-func hashmem(t *types.Type) *ir.Node {
+func hashmem(t *types.Type) ir.Node {
sym := Runtimepkg.Lookup("memhash")
n := NewName(sym)
setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[types.TUINTPTR]),
anonfield(types.Types[types.TUINTPTR]),
- }, []*ir.Node{
+ }, []ir.Node{
anonfield(types.Types[types.TUINTPTR]),
}))
return n
return true
}
-func listtreecopy(l []*ir.Node, pos src.XPos) []*ir.Node {
- var out []*ir.Node
+func listtreecopy(l []ir.Node, pos src.XPos) []ir.Node {
+ var out []ir.Node
for _, n := range l {
out = append(out, treecopy(n, pos))
}
return out
}
-func liststmt(l []*ir.Node) *ir.Node {
+func liststmt(l []ir.Node) ir.Node {
n := ir.Nod(ir.OBLOCK, nil, nil)
n.PtrList().Set(l)
if len(l) != 0 {
return n
}
-func ngotype(n *ir.Node) *types.Sym {
+func ngotype(n ir.Node) *types.Sym {
if n.Type() != nil {
return typenamesym(n.Type())
}
// The result of addinit MUST be assigned back to n, e.g.
// n.Left = addinit(n.Left, init)
-func addinit(n *ir.Node, init []*ir.Node) *ir.Node {
+func addinit(n ir.Node, init []ir.Node) ir.Node {
if len(init) == 0 {
return n
}
}
// itabType loads the _type field from a runtime.itab struct.
-func itabType(itab *ir.Node) *ir.Node {
+func itabType(itab ir.Node) ir.Node {
typ := nodSym(ir.ODOTPTR, itab, nil)
typ.SetType(types.NewPtr(types.Types[types.TUINT8]))
typ.SetTypecheck(1)
// ifaceData loads the data field from an interface.
// The concrete type must be known to have type t.
// It follows the pointer if !isdirectiface(t).
-func ifaceData(pos src.XPos, n *ir.Node, t *types.Type) *ir.Node {
+func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
if t.IsInterface() {
base.Fatalf("ifaceData interface: %v", t)
}
)
// typecheckswitch typechecks a switch statement.
-func typecheckswitch(n *ir.Node) {
+func typecheckswitch(n ir.Node) {
typecheckslice(n.Init().Slice(), ctxStmt)
if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
typecheckTypeSwitch(n)
}
}
-func typecheckTypeSwitch(n *ir.Node) {
+func typecheckTypeSwitch(n ir.Node) {
n.Left().SetRight(typecheck(n.Left().Right(), ctxExpr))
t := n.Left().Right().Type()
if t != nil && !t.IsInterface() {
base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
}
- var defCase, nilCase *ir.Node
+ var defCase, nilCase ir.Node
var ts typeSet
for _, ncase := range n.List().Slice() {
ls := ncase.List().Slice()
s.m[ls] = append(prevs, typeSetEntry{pos, typ})
}
-func typecheckExprSwitch(n *ir.Node) {
+func typecheckExprSwitch(n ir.Node) {
t := types.Types[types.TBOOL]
if n.Left() != nil {
n.SetLeft(typecheck(n.Left(), ctxExpr))
}
}
- var defCase *ir.Node
+ var defCase ir.Node
var cs constSet
for _, ncase := range n.List().Slice() {
ls := ncase.List().Slice()
}
// walkswitch walks a switch statement.
-func walkswitch(sw *ir.Node) {
+func walkswitch(sw ir.Node) {
// Guard against double walk, see #25776.
if sw.List().Len() == 0 && sw.Body().Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
// walkExprSwitch generates an AST implementing sw. sw is an
// expression switch.
-func walkExprSwitch(sw *ir.Node) {
+func walkExprSwitch(sw ir.Node) {
lno := setlineno(sw)
cond := sw.Left()
exprname: cond,
}
- var defaultGoto *ir.Node
+ var defaultGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
label := autolabel(".s")
// An exprSwitch walks an expression switch.
type exprSwitch struct {
- exprname *ir.Node // value being switched on
+ exprname ir.Node // value being switched on
done ir.Nodes
clauses []exprClause
type exprClause struct {
pos src.XPos
- lo, hi *ir.Node
- jmp *ir.Node
+ lo, hi ir.Node
+ jmp ir.Node
}
-func (s *exprSwitch) Add(pos src.XPos, expr, jmp *ir.Node) {
+func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
// Perform two-level binary search.
binarySearch(len(runs), &s.done,
- func(i int) *ir.Node {
+ func(i int) ir.Node {
return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1])))
},
- func(i int, nif *ir.Node) {
+ func(i int, nif ir.Node) {
run := runs[i]
nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
s.search(run, nif.PtrBody())
func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
binarySearch(len(cc), out,
- func(i int) *ir.Node {
+ func(i int) ir.Node {
return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi)
},
- func(i int, nif *ir.Node) {
+ func(i int, nif ir.Node) {
c := &cc[i]
nif.SetLeft(c.test(s.exprname))
nif.PtrBody().Set1(c.jmp)
)
}
-func (c *exprClause) test(exprname *ir.Node) *ir.Node {
+func (c *exprClause) test(exprname ir.Node) ir.Node {
// Integer range.
if c.hi != c.lo {
low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo)
return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo)
}
-func allCaseExprsAreSideEffectFree(sw *ir.Node) bool {
+func allCaseExprsAreSideEffectFree(sw ir.Node) bool {
// In theory, we could be more aggressive, allowing any
// side-effect-free expressions in cases, but it's a bit
// tricky because some of that information is unavailable due
}
// hasFall reports whether stmts ends with a "fallthrough" statement.
-func hasFall(stmts []*ir.Node) (bool, src.XPos) {
+func hasFall(stmts []ir.Node) (bool, src.XPos) {
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
-func walkTypeSwitch(sw *ir.Node) {
+func walkTypeSwitch(sw ir.Node) {
var s typeSwitch
s.facename = sw.Left().Right()
sw.SetLeft(nil)
s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody())
br := ir.Nod(ir.OBREAK, nil, nil)
- var defaultGoto, nilGoto *ir.Node
+ var defaultGoto, nilGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
- var caseVar *ir.Node
+ var caseVar ir.Node
if ncase.Rlist().Len() != 0 {
caseVar = ncase.Rlist().First()
}
}
val = ifaceData(ncase.Pos(), s.facename, singleType)
}
- l := []*ir.Node{
+ l := []ir.Node{
ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil),
ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val),
}
// A typeSwitch walks a type switch.
type typeSwitch struct {
// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
- facename *ir.Node // value being type-switched on
- hashname *ir.Node // type hash of the value being type-switched on
- okname *ir.Node // boolean used for comma-ok type assertions
+ facename ir.Node // value being type-switched on
+ hashname ir.Node // type hash of the value being type-switched on
+ okname ir.Node // boolean used for comma-ok type assertions
done ir.Nodes
clauses []typeClause
body ir.Nodes
}
-func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp *ir.Node) {
+func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
var body ir.Nodes
if caseVar != nil {
- l := []*ir.Node{
+ l := []ir.Node{
ir.NodAt(pos, ir.ODCL, caseVar, nil),
ir.NodAt(pos, ir.OAS, caseVar, nil),
}
cc = merged
binarySearch(len(cc), &s.done,
- func(i int) *ir.Node {
+ func(i int) ir.Node {
return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
},
- func(i int, nif *ir.Node) {
+ func(i int, nif ir.Node) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
//
// leaf(i, nif) should setup nif (an OIF node) to test case i. In
// particular, it should set nif.Left and nif.Nbody.
-func binarySearch(n int, out *ir.Nodes, less func(i int) *ir.Node, leaf func(i int, nif *ir.Node)) {
+func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif ir.Node)) {
const binarySearchMin = 4 // minimum number of cases for binary search
var do func(lo, hi int, out *ir.Nodes)
var traceIndent []byte
var skipDowidthForTracing bool
-func tracePrint(title string, n *ir.Node) func(np **ir.Node) {
+func tracePrint(title string, n ir.Node) func(np *ir.Node) {
indent := traceIndent
// guard against nil
fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
traceIndent = append(traceIndent, ". "...)
- return func(np **ir.Node) {
+ return func(np *ir.Node) {
traceIndent = traceIndent[:len(traceIndent)-2]
// if we have a result, use that
// marks variables that escape the local frame.
// rewrites n.Op to be more specific in some cases.
-var typecheckdefstack []*ir.Node
+var typecheckdefstack []ir.Node
// resolve ONONAME to definition, if any.
-func resolve(n *ir.Node) (res *ir.Node) {
+func resolve(n ir.Node) (res ir.Node) {
if n == nil || n.Op() != ir.ONONAME {
return n
}
return r
}
-func typecheckslice(l []*ir.Node, top int) {
+func typecheckslice(l []ir.Node, top int) {
for i := range l {
l[i] = typecheck(l[i], top)
}
return fmt.Sprintf("etype=%d", et)
}
-func cycleFor(start *ir.Node) []*ir.Node {
+func cycleFor(start ir.Node) []ir.Node {
// Find the start node in typecheck_tcstack.
// We know that it must exist because each time we mark
// a node with n.SetTypecheck(2) we push it on the stack,
}
// collect all nodes with same Op
- var cycle []*ir.Node
+ var cycle []ir.Node
for _, n := range typecheck_tcstack[i:] {
if n.Op() == start.Op() {
cycle = append(cycle, n)
return cycle
}
-func cycleTrace(cycle []*ir.Node) string {
+func cycleTrace(cycle []ir.Node) string {
var s string
for i, n := range cycle {
s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
return s
}
-var typecheck_tcstack []*ir.Node
+var typecheck_tcstack []ir.Node
// typecheck type checks node n.
// The result of typecheck MUST be assigned back to n, e.g.
// n.Left = typecheck(n.Left, top)
-func typecheck(n *ir.Node, top int) (res *ir.Node) {
+func typecheck(n ir.Node, top int) (res ir.Node) {
// cannot type check until all the source has been parsed
if !typecheckok {
base.Fatalf("early typecheck")
// value of type int (see also checkmake for comparison).
// The result of indexlit MUST be assigned back to n, e.g.
// n.Left = indexlit(n.Left)
-func indexlit(n *ir.Node) *ir.Node {
+func indexlit(n ir.Node) ir.Node {
if n != nil && n.Type() != nil && n.Type().Etype == types.TIDEAL {
return defaultlit(n, types.Types[types.TINT])
}
// The result of typecheck1 MUST be assigned back to n, e.g.
// n.Left = typecheck1(n.Left, top)
-func typecheck1(n *ir.Node, top int) (res *ir.Node) {
+func typecheck1(n ir.Node, top int) (res ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheck1", n)(&res)
}
ir.OOROR,
ir.OSUB,
ir.OXOR:
- var l *ir.Node
+ var l ir.Node
var op ir.Op
- var r *ir.Node
+ var r ir.Node
if n.Op() == ir.OASOP {
ok |= ctxStmt
n.SetLeft(typecheck(n.Left(), ctxExpr))
l = args[i]
i++
l = typecheck(l, ctxExpr)
- var r *ir.Node
+ var r ir.Node
if i < len(args) {
r = args[i]
i++
return n
}
-func typecheckargs(n *ir.Node) {
+func typecheckargs(n ir.Node) {
if n.List().Len() != 1 || n.IsDDD() {
typecheckslice(n.List().Slice(), ctxExpr)
return
n.PtrInit().Append(as)
}
-func checksliceindex(l *ir.Node, r *ir.Node, tp *types.Type) bool {
+func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
t := r.Type()
if t == nil {
return false
return true
}
-func checksliceconst(lo *ir.Node, hi *ir.Node) bool {
+func checksliceconst(lo ir.Node, hi ir.Node) bool {
if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
base.Errorf("invalid slice index: %v > %v", lo, hi)
return false
return true
}
-func checkdefergo(n *ir.Node) {
+func checkdefergo(n ir.Node) {
what := "defer"
if n.Op() == ir.OGO {
what = "go"
// The result of implicitstar MUST be assigned back to n, e.g.
// n.Left = implicitstar(n.Left)
-func implicitstar(n *ir.Node) *ir.Node {
+func implicitstar(n ir.Node) ir.Node {
// insert implicit * if needed for fixed array
t := n.Type()
if t == nil || !t.IsPtr() {
return n
}
-func onearg(n *ir.Node, f string, args ...interface{}) bool {
+func onearg(n ir.Node, f string, args ...interface{}) bool {
if n.Left() != nil {
return true
}
return true
}
-func twoarg(n *ir.Node) bool {
+func twoarg(n ir.Node) bool {
if n.Left() != nil {
return true
}
return true
}
-func lookdot1(errnode *ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+func lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
var r *types.Field
for _, f := range fs.Slice() {
if dostrcmp != 0 && f.Sym.Name == s.Name {
// typecheckMethodExpr checks selector expressions (ODOT) where the
// base expression is a type expression (OTYPE).
-func typecheckMethodExpr(n *ir.Node) (res *ir.Node) {
+func typecheckMethodExpr(n ir.Node) (res ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckMethodExpr", n)(&res)
}
return t
}
-func lookdot(n *ir.Node, t *types.Type, dostrcmp int) *types.Field {
+func lookdot(n ir.Node, t *types.Type, dostrcmp int) *types.Field {
s := n.Sym()
dowidth(t)
}
// typecheck assignment: type list = expression list
-func typecheckaste(op ir.Op, call *ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
var t *types.Type
var i int
return
}
- var n *ir.Node
+ var n ir.Node
if nl.Len() == 1 {
n = nl.First()
}
// pushtype adds elided type information for composite literals if
// appropriate, and returns the resulting expression.
-func pushtype(n *ir.Node, t *types.Type) *ir.Node {
+func pushtype(n ir.Node, t *types.Type) ir.Node {
if n == nil || n.Op() != ir.OCOMPLIT || n.Right() != nil {
return n
}
// The result of typecheckcomplit MUST be assigned back to n, e.g.
// n.Left = typecheckcomplit(n.Left)
-func typecheckcomplit(n *ir.Node) (res *ir.Node) {
+func typecheckcomplit(n ir.Node) (res ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckcomplit", n)(&res)
}
}
// typecheckarraylit type-checks a sequence of slice/array literal elements.
-func typecheckarraylit(elemType *types.Type, bound int64, elts []*ir.Node, ctx string) int64 {
+func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
// If there are key/value pairs, create a map to keep seen
// keys so we can check for duplicate indices.
var indices map[int64]bool
for i, elt := range elts {
setlineno(elt)
r := elts[i]
- var kv *ir.Node
+ var kv ir.Node
if elt.Op() == ir.OKEY {
elt.SetLeft(typecheck(elt.Left(), ctxExpr))
key = indexconst(elt.Left())
}
// lvalue etc
-func islvalue(n *ir.Node) bool {
+func islvalue(n ir.Node) bool {
switch n.Op() {
case ir.OINDEX:
if n.Left().Type() != nil && n.Left().Type().IsArray() {
return false
}
-func checklvalue(n *ir.Node, verb string) {
+func checklvalue(n ir.Node, verb string) {
if !islvalue(n) {
base.Errorf("cannot %s %v", verb, n)
}
}
-func checkassign(stmt *ir.Node, n *ir.Node) {
+func checkassign(stmt ir.Node, n ir.Node) {
// Variables declared in ORANGE are assigned on every iteration.
if n.Name() == nil || n.Name().Defn != stmt || stmt.Op() == ir.ORANGE {
r := outervalue(n)
n.SetType(nil)
}
-func checkassignlist(stmt *ir.Node, l ir.Nodes) {
+func checkassignlist(stmt ir.Node, l ir.Nodes) {
for _, n := range l.Slice() {
checkassign(stmt, n)
}
// currently OK, since the only place samesafeexpr gets used on an
// lvalue expression is for OSLICE and OAPPEND optimizations, and it
// is correct in those settings.
-func samesafeexpr(l *ir.Node, r *ir.Node) bool {
+func samesafeexpr(l ir.Node, r ir.Node) bool {
if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
return false
}
// type check assignment.
// if this assignment is the definition of a var on the left side,
// fill in the var's type.
-func typecheckas(n *ir.Node) {
+func typecheckas(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas", n)(nil)
}
}
}
-func checkassignto(src *types.Type, dst *ir.Node) {
+func checkassignto(src *types.Type, dst ir.Node) {
if op, why := assignop(src, dst.Type()); op == ir.OXXX {
base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
return
}
}
-func typecheckas2(n *ir.Node) {
+func typecheckas2(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckas2", n)(nil)
}
}
checkassignlist(n, n.List())
- var l *ir.Node
- var r *ir.Node
+ var l ir.Node
+ var r ir.Node
if cl == cr {
// easy
ls := n.List().Slice()
}
// type check function definition
-func typecheckfunc(n *ir.Node) {
+func typecheckfunc(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckfunc", n)(nil)
}
// The result of stringtoruneslit MUST be assigned back to n, e.g.
// n.Left = stringtoruneslit(n.Left)
-func stringtoruneslit(n *ir.Node) *ir.Node {
+func stringtoruneslit(n ir.Node) ir.Node {
if n.Left().Op() != ir.OLITERAL || n.Left().Val().Kind() != constant.String {
base.Fatalf("stringtoarraylit %v", n)
}
- var l []*ir.Node
+ var l []ir.Node
i := 0
for _, r := range n.Left().StringVal() {
l = append(l, ir.Nod(ir.OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
return nn
}
-var mapqueue []*ir.Node
+var mapqueue []ir.Node
func checkMapKeys() {
for _, n := range mapqueue {
}
}
-func typecheckdeftype(n *ir.Node) {
+func typecheckdeftype(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckdeftype", n)(nil)
}
}
}
-func typecheckdef(n *ir.Node) {
+func typecheckdef(n ir.Node) {
if enableTrace && base.Flag.LowerT {
defer tracePrint("typecheckdef", n)(nil)
}
n.SetWalkdef(1)
}
-func checkmake(t *types.Type, arg string, np **ir.Node) bool {
+func checkmake(t *types.Type, arg string, np *ir.Node) bool {
n := *np
if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL {
base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
return true
}
-func markbreak(n *ir.Node, implicit *ir.Node) {
+func markbreak(n ir.Node, implicit ir.Node) {
if n == nil {
return
}
}
}
-func markbreaklist(l ir.Nodes, implicit *ir.Node) {
+func markbreaklist(l ir.Nodes, implicit ir.Node) {
s := l.Slice()
for i := 0; i < len(s); i++ {
n := s[i]
// Isterminating reports whether the node n, the last one in a
// statement list, is a terminating statement.
-func isTermNode(n *ir.Node) bool {
+func isTermNode(n ir.Node) bool {
switch n.Op() {
// NOTE: OLABEL is treated as a separate statement,
// not a separate prefix, so skipping to the last statement
}
// checkreturn makes sure that fn terminates appropriately.
-func checkreturn(fn *ir.Node) {
+func checkreturn(fn ir.Node) {
if fn.Type().NumResults() != 0 && fn.Body().Len() != 0 {
markbreaklist(fn.Body(), nil)
if !isTermNodes(fn.Body()) {
}
}
-func deadcode(fn *ir.Node) {
+func deadcode(fn ir.Node) {
deadcodeslice(fn.PtrBody())
deadcodefn(fn)
}
-func deadcodefn(fn *ir.Node) {
+func deadcodefn(fn ir.Node) {
if fn.Body().Len() == 0 {
return
}
}
}
- fn.PtrBody().Set([]*ir.Node{ir.Nod(ir.OEMPTY, nil, nil)})
+ fn.PtrBody().Set([]ir.Node{ir.Nod(ir.OEMPTY, nil, nil)})
}
func deadcodeslice(nn *ir.Nodes) {
}
}
-func deadcodeexpr(n *ir.Node) *ir.Node {
+func deadcodeexpr(n ir.Node) ir.Node {
// Perform dead-code elimination on short-circuited boolean
// expressions involving constants with the intent of
// producing a constant 'if' condition.
}
// setTypeNode sets n to an OTYPE node representing t.
-func setTypeNode(n *ir.Node, t *types.Type) {
+func setTypeNode(n ir.Node, t *types.Type) {
n.SetOp(ir.OTYPE)
n.SetType(t)
n.Type().Nod = n
// MethodName returns the ONAME representing the method
// referenced by expression n, which must be a method selector,
// method expression, or method value.
-func methodExprName(n *ir.Node) *ir.Node {
+func methodExprName(n ir.Node) ir.Node {
return ir.AsNode(methodExprFunc(n).Nname)
}
// MethodFunc is like MethodName, but returns the types.Field instead.
-func methodExprFunc(n *ir.Node) *types.Field {
+func methodExprFunc(n ir.Node) *types.Field {
switch n.Op() {
case ir.ODOTMETH, ir.OMETHEXPR:
return n.Opt().(*types.Field)
)
// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n *ir.Node) int64 {
+func evalunsafe(n ir.Node) int64 {
switch n.Op() {
case ir.OALIGNOF, ir.OSIZEOF:
n.SetLeft(typecheck(n.Left(), ctxExpr))
const tmpstringbufsize = 32
const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
-func walk(fn *ir.Node) {
+func walk(fn ir.Node) {
Curfn = fn
errorsBefore := base.Errors()
}
}
-func walkstmtlist(s []*ir.Node) {
+func walkstmtlist(s []ir.Node) {
for i := range s {
s[i] = walkstmt(s[i])
}
}
-func paramoutheap(fn *ir.Node) bool {
+func paramoutheap(fn ir.Node) bool {
for _, ln := range fn.Func().Dcl {
switch ln.Class() {
case ir.PPARAMOUT:
// The result of walkstmt MUST be assigned back to n, e.g.
// n.Left = walkstmt(n.Left)
-func walkstmt(n *ir.Node) *ir.Node {
+func walkstmt(n ir.Node) ir.Node {
if n == nil {
return n
}
if (Curfn.Type().FuncType().Outnamed && n.List().Len() > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
// so that reorder3 can fix up conflicts
- var rl []*ir.Node
+ var rl []ir.Node
for _, ln := range Curfn.Func().Dcl {
cl := ln.Class()
// For each return parameter (lhs), assign the corresponding result (rhs).
lhs := Curfn.Type().Results()
rhs := n.List().Slice()
- res := make([]*ir.Node, lhs.NumFields())
+ res := make([]ir.Node, lhs.NumFields())
for i, nl := range lhs.FieldSlice() {
nname := ir.AsNode(nl.Nname)
if isParamHeapCopy(nname) {
// the types expressions are calculated.
// compile-time constants are evaluated.
// complex side effects like statements are appended to init
-func walkexprlist(s []*ir.Node, init *ir.Nodes) {
+func walkexprlist(s []ir.Node, init *ir.Nodes) {
for i := range s {
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistsafe(s []*ir.Node, init *ir.Nodes) {
+func walkexprlistsafe(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = safeexpr(n, init)
s[i] = walkexpr(s[i], init)
}
}
-func walkexprlistcheap(s []*ir.Node, init *ir.Nodes) {
+func walkexprlistcheap(s []ir.Node, init *ir.Nodes) {
for i, n := range s {
s[i] = cheapexpr(n, init)
s[i] = walkexpr(s[i], init)
// The result of walkexpr MUST be assigned back to n, e.g.
// n.Left = walkexpr(n.Left, init)
-func walkexpr(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkexpr(n ir.Node, init *ir.Nodes) ir.Node {
if n == nil {
return n
}
r := n.Right()
walkexprlistsafe(n.List().Slice(), init)
r.SetLeft(walkexpr(r.Left(), init))
- var n1 *ir.Node
+ var n1 ir.Node
if ir.IsBlank(n.List().First()) {
n1 = nodnil()
} else {
t := r.Left().Type()
fast := mapfast(t)
- var key *ir.Node
+ var key ir.Node
if fast != mapslow {
// fast versions take key by value
key = r.Right()
}
// typeword generates the type word of the interface value.
- typeword := func() *ir.Node {
+ typeword := func() ir.Node {
if toType.IsEmptyInterface() {
return typename(fromType)
}
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
// by using an existing addressable value identical to n.Left
// or creating one on the stack.
- var value *ir.Node
+ var value ir.Node
switch {
case fromType.Size() == 0:
// n.Left is zero-sized. Use zerobase.
break
}
- var tab *ir.Node
+ var tab ir.Node
if fromType.IsInterface() {
// convI2I
tab = typename(toType)
hint := n.Left()
// var h *hmap
- var h *ir.Node
+ var h ir.Node
if n.Esc() == EscNone {
// Allocate hmap on stack.
// Allocate a [n]byte of the right size.
t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
- var a *ir.Node
+ var a ir.Node
if n.Esc() == EscNone && len(sc) <= int(maxImplicitStackVarSize) {
a = ir.Nod(ir.OADDR, temp(t), nil)
} else {
// markUsedIfaceMethod marks that an interface method is used in the current
// function. n is OCALLINTER node.
-func markUsedIfaceMethod(n *ir.Node) {
+func markUsedIfaceMethod(n ir.Node) {
ityp := n.Left().Left().Type()
tsym := typenamesym(ityp).Linksym()
r := obj.Addrel(Curfn.Func().LSym)
}
// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *ir.Node) *ir.Node {
+func reduceSlice(n ir.Node) ir.Node {
low, high, max := n.SliceBounds()
if high != nil && high.Op() == ir.OLEN && samesafeexpr(n.Left(), high.Left()) {
// Reduce x[i:len(x)] to x[i:].
return n
}
-func ascompatee1(l *ir.Node, r *ir.Node, init *ir.Nodes) *ir.Node {
+func ascompatee1(l ir.Node, r ir.Node, init *ir.Nodes) ir.Node {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
n := ir.Nod(ir.OAS, l, r)
return convas(n, init)
}
-func ascompatee(op ir.Op, nl, nr []*ir.Node, init *ir.Nodes) []*ir.Node {
+func ascompatee(op ir.Op, nl, nr []ir.Node, init *ir.Nodes) []ir.Node {
// check assign expression list to
// an expression list. called in
// expr-list = expr-list
nr[i1] = safeexpr(nr[i1], init)
}
- var nn []*ir.Node
+ var nn []ir.Node
i := 0
for ; i < len(nl); i++ {
if i >= len(nr) {
}
// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
-func fncall(l *ir.Node, rt *types.Type) bool {
+func fncall(l ir.Node, rt *types.Type) bool {
if l.HasCall() || l.Op() == ir.OINDEXMAP {
return true
}
// check assign type list to
// an expression list. called in
// expr-list = func()
-func ascompatet(nl ir.Nodes, nr *types.Type) []*ir.Node {
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
if nl.Len() != nr.NumFields() {
base.Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
}
}
// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(typ *types.Type, args []*ir.Node) *ir.Node {
- var n *ir.Node
+func mkdotargslice(typ *types.Type, args []ir.Node) ir.Node {
+ var n ir.Node
if len(args) == 0 {
n = nodnil()
n.SetType(typ)
// fixVariadicCall rewrites calls to variadic functions to use an
// explicit ... argument if one is not already present.
-func fixVariadicCall(call *ir.Node) {
+func fixVariadicCall(call ir.Node) {
fntype := call.Left().Type()
if !fntype.IsVariadic() || call.IsDDD() {
return
call.SetIsDDD(true)
}
-func walkCall(n *ir.Node, init *ir.Nodes) {
+func walkCall(n ir.Node, init *ir.Nodes) {
if n.Rlist().Len() != 0 {
return // already walked
}
// If this is a method call, add the receiver at the beginning of the args.
if n.Op() == ir.OCALLMETH {
- withRecv := make([]*ir.Node, len(args)+1)
+ withRecv := make([]ir.Node, len(args)+1)
withRecv[0] = n.Left().Left()
n.Left().SetLeft(nil)
copy(withRecv[1:], args)
// store that argument into a temporary variable,
// to prevent that calls from clobbering arguments already on the stack.
// When instrumenting, all arguments might require function calls.
- var tempAssigns []*ir.Node
+ var tempAssigns []ir.Node
for i, arg := range args {
updateHasCall(arg)
// Determine param type.
}
// generate code for print
-func walkprint(nn *ir.Node, init *ir.Nodes) *ir.Node {
+func walkprint(nn ir.Node, init *ir.Nodes) ir.Node {
// Hoist all the argument evaluation up before the lock.
walkexprlistcheap(nn.List().Slice(), init)
// For println, add " " between elements and "\n" at the end.
if nn.Op() == ir.OPRINTN {
s := nn.List().Slice()
- t := make([]*ir.Node, 0, len(s)*2)
+ t := make([]ir.Node, 0, len(s)*2)
for i, n := range s {
if i != 0 {
t = append(t, nodstr(" "))
// Collapse runs of constant strings.
s := nn.List().Slice()
- t := make([]*ir.Node, 0, len(s))
+ t := make([]ir.Node, 0, len(s))
for i := 0; i < len(s); {
var strs []string
for i < len(s) && ir.IsConst(s[i], constant.String) {
}
nn.PtrList().Set(t)
- calls := []*ir.Node{mkcall("printlock", nil, init)}
+ calls := []ir.Node{mkcall("printlock", nil, init)}
for i, n := range nn.List().Slice() {
if n.Op() == ir.OLITERAL {
if n.Type() == types.UntypedRune {
continue
}
- var on *ir.Node
+ var on ir.Node
switch n.Type().Etype {
case types.TINTER:
if n.Type().IsEmptyInterface() {
return r
}
-func callnew(t *types.Type) *ir.Node {
+func callnew(t *types.Type) ir.Node {
dowidth(t)
n := ir.Nod(ir.ONEWOBJ, typename(t), nil)
n.SetType(types.NewPtr(t))
// isReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
-func isReflectHeaderDataField(l *ir.Node) bool {
+func isReflectHeaderDataField(l ir.Node) bool {
if l.Type() != types.Types[types.TUINTPTR] {
return false
}
return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
}
-func convas(n *ir.Node, init *ir.Nodes) *ir.Node {
+func convas(n ir.Node, init *ir.Nodes) ir.Node {
if n.Op() != ir.OAS {
base.Fatalf("convas: not OAS %v", n.Op())
}
// be later use of an earlier lvalue.
//
// function calls have been removed.
-func reorder3(all []*ir.Node) []*ir.Node {
+func reorder3(all []ir.Node) []ir.Node {
// If a needed expression may be affected by an
// earlier assignment, make an early copy of that
// expression and use the copy instead.
- var early []*ir.Node
+ var early []ir.Node
var mapinit ir.Nodes
for i, n := range all {
// replace *np with that temp.
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n *ir.Node, all []*ir.Node, i int, early *[]*ir.Node) *ir.Node {
+func reorder3save(n ir.Node, all []ir.Node, i int, early *[]ir.Node) ir.Node {
if !aliased(n, all[:i]) {
return n
}
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
-func outervalue(n *ir.Node) *ir.Node {
+func outervalue(n ir.Node) ir.Node {
for {
switch n.Op() {
case ir.OXDOT:
// Is it possible that the computation of r might be
// affected by assignments in all?
-func aliased(r *ir.Node, all []*ir.Node) bool {
+func aliased(r ir.Node, all []ir.Node) bool {
if r == nil {
return false
}
// does the evaluation of n only refer to variables
// whose addresses have not been taken?
// (and no other memory)
-func varexpr(n *ir.Node) bool {
+func varexpr(n ir.Node) bool {
if n == nil {
return true
}
}
// is the name l mentioned in r?
-func vmatch2(l *ir.Node, r *ir.Node) bool {
+func vmatch2(l ir.Node, r ir.Node) bool {
if r == nil {
return false
}
// is any name mentioned in l also mentioned in r?
// called by sinit.go
-func vmatch1(l *ir.Node, r *ir.Node) bool {
+func vmatch1(l ir.Node, r ir.Node) bool {
// isolate all left sides
if l == nil || r == nil {
return false
// paramstoheap returns code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
-func paramstoheap(params *types.Type) []*ir.Node {
- var nn []*ir.Node
+func paramstoheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
v := ir.AsNode(t.Nname)
if v != nil && v.Sym() != nil && strings.HasPrefix(v.Sym().Name, "~r") { // unnamed result
// returnsfromheap returns code to copy values for heap-escaped parameters
// back to the stack.
-func returnsfromheap(params *types.Type) []*ir.Node {
- var nn []*ir.Node
+func returnsfromheap(params *types.Type) []ir.Node {
+ var nn []ir.Node
for _, t := range params.Fields().Slice() {
v := ir.AsNode(t.Nname)
if v == nil {
base.Pos = lno
}
-func vmkcall(fn *ir.Node, t *types.Type, init *ir.Nodes, va []*ir.Node) *ir.Node {
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node {
if fn.Type() == nil || fn.Type().Etype != types.TFUNC {
base.Fatalf("mkcall %v %v", fn, fn.Type())
}
return r
}
-func mkcall(name string, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.Node {
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node {
return vmkcall(syslook(name), t, init, args)
}
-func mkcall1(fn *ir.Node, t *types.Type, init *ir.Nodes, args ...*ir.Node) *ir.Node {
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) ir.Node {
return vmkcall(fn, t, init, args)
}
-func conv(n *ir.Node, t *types.Type) *ir.Node {
+func conv(n ir.Node, t *types.Type) ir.Node {
if types.Identical(n.Type(), t) {
return n
}
// convnop converts node n to type t using the OCONVNOP op
// and typechecks the result with ctxExpr.
-func convnop(n *ir.Node, t *types.Type) *ir.Node {
+func convnop(n ir.Node, t *types.Type) ir.Node {
if types.Identical(n.Type(), t) {
return n
}
// byteindex converts n, which is byte-sized, to an int used to index into an array.
// We cannot use conv, because we allow converting bool to int here,
// which is forbidden in user code.
-func byteindex(n *ir.Node) *ir.Node {
+func byteindex(n ir.Node) ir.Node {
// We cannot convert from bool to int directly.
// While converting from int8 to int is possible, it would yield
// the wrong result for negative values.
return n
}
-func chanfn(name string, n int, t *types.Type) *ir.Node {
+func chanfn(name string, n int, t *types.Type) ir.Node {
if !t.IsChan() {
base.Fatalf("chanfn %v", t)
}
return fn
}
-func mapfn(name string, t *types.Type) *ir.Node {
+func mapfn(name string, t *types.Type) ir.Node {
if !t.IsMap() {
base.Fatalf("mapfn %v", t)
}
return fn
}
-func mapfndel(name string, t *types.Type) *ir.Node {
+func mapfndel(name string, t *types.Type) ir.Node {
if !t.IsMap() {
base.Fatalf("mapfn %v", t)
}
return mapslow
}
-func writebarrierfn(name string, l *types.Type, r *types.Type) *ir.Node {
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
fn := syslook(name)
fn = substArgTypes(fn, l, r)
return fn
}
-func addstr(n *ir.Node, init *ir.Nodes) *ir.Node {
+func addstr(n ir.Node, init *ir.Nodes) ir.Node {
// order.expr rewrote OADDSTR to have a list of strings.
c := n.List().Len()
}
// build list of string arguments
- args := []*ir.Node{buf}
+ args := []ir.Node{buf}
for _, n2 := range n.List().Slice() {
args = append(args, conv(n2, types.Types[types.TSTRING]))
}
prealloc[slice] = prealloc[n]
}
slice.PtrList().Set(args[1:]) // skip buf arg
- args = []*ir.Node{buf, slice}
+ args = []ir.Node{buf, slice}
slice.SetEsc(EscNone)
}
return r
}
-func walkAppendArgs(n *ir.Node, init *ir.Nodes) {
+func walkAppendArgs(n ir.Node, init *ir.Nodes) {
walkexprlistsafe(n.List().Slice(), init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// s
//
// l2 is allowed to be a string.
-func appendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
+func appendslice(n ir.Node, init *ir.Nodes) ir.Node {
walkAppendArgs(n, init)
l1 := n.List().First()
nt.SetBounded(true)
nodes.Append(ir.Nod(ir.OAS, s, nt))
- var ncopy *ir.Node
+ var ncopy ir.Node
if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
nptr1 := ir.Nod(ir.OSLICE, s, nil)
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
-func isAppendOfMake(n *ir.Node) bool {
+func isAppendOfMake(n ir.Node) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
// }
// }
// s
-func extendslice(n *ir.Node, init *ir.Nodes) *ir.Node {
+func extendslice(n ir.Node, init *ir.Nodes) ir.Node {
// isAppendOfMake made sure all possible positive values of l2 fit into an uint.
// The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
// check of l2 < 0 at runtime which is generated below.
l1 := n.List().First()
l2 = n.List().Second() // re-read l2, as it may have been updated by walkAppendArgs
- var nodes []*ir.Node
+ var nodes []ir.Node
// if l2 >= 0 (likely happens), do nothing
nifneg := ir.Nod(ir.OIF, ir.Nod(ir.OGE, l2, nodintconst(0)), nil)
// ...
// }
// s
-func walkappend(n *ir.Node, init *ir.Nodes, dst *ir.Node) *ir.Node {
+func walkappend(n ir.Node, init *ir.Nodes, dst ir.Node) ir.Node {
if !samesafeexpr(dst, n.List().First()) {
n.List().SetFirst(safeexpr(n.List().First(), init))
n.List().SetFirst(walkexpr(n.List().First(), init))
return n
}
- var l []*ir.Node
+ var l []ir.Node
ns := temp(nsrc.Type())
l = append(l, ir.Nod(ir.OAS, ns, nsrc)) // s = src
//
// Also works if b is a string.
//
-func copyany(n *ir.Node, init *ir.Nodes, runtimecall bool) *ir.Node {
+func copyany(n ir.Node, init *ir.Nodes, runtimecall bool) ir.Node {
if n.Left().Type().Elem().HasPointers() {
Curfn.Func().SetWBPos(n.Pos())
fn := writebarrierfn("typedslicecopy", n.Left().Type().Elem(), n.Right().Type().Elem())
n.SetRight(walkexpr(n.Right(), init))
nl := temp(n.Left().Type())
nr := temp(n.Right().Type())
- var l []*ir.Node
+ var l []ir.Node
l = append(l, ir.Nod(ir.OAS, nl, n.Left()))
l = append(l, ir.Nod(ir.OAS, nr, n.Right()))
return nlen
}
-func eqfor(t *types.Type) (n *ir.Node, needsize bool) {
+func eqfor(t *types.Type) (n ir.Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
sym := typesymprefix(".eq", t)
n := NewName(sym)
setNodeNameFunc(n)
- n.SetType(functype(nil, []*ir.Node{
+ n.SetType(functype(nil, []ir.Node{
anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)),
- }, []*ir.Node{
+ }, []ir.Node{
anonfield(types.Types[types.TBOOL]),
}))
return n, false
// The result of walkcompare MUST be assigned back to n, e.g.
// n.Left = walkcompare(n.Left, init)
-func walkcompare(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkcompare(n ir.Node, init *ir.Nodes) ir.Node {
if n.Left().Type().IsInterface() && n.Right().Type().IsInterface() && n.Left().Op() != ir.ONIL && n.Right().Op() != ir.ONIL {
return walkcompareInterface(n, init)
}
// l.tab == type(r)
// For non-empty interface, this is:
// l.tab != nil && l.tab._type == type(r)
- var eqtype *ir.Node
+ var eqtype ir.Node
tab := ir.Nod(ir.OITAB, l, nil)
rtyp := typename(r.Type())
if l.Type().IsEmptyInterface() {
if n.Op() == ir.ONE {
andor = ir.OOROR
}
- var expr *ir.Node
- compare := func(el, er *ir.Node) {
+ var expr ir.Node
+ compare := func(el, er ir.Node) {
a := ir.Nod(n.Op(), el, er)
if expr == nil {
expr = a
return n
}
-func tracecmpArg(n *ir.Node, t *types.Type, init *ir.Nodes) *ir.Node {
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
// Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
if n.Op() == ir.OLITERAL && n.Type().IsSigned() && n.Int64Val() < 0 {
n = copyexpr(n, n.Type(), init)
return conv(n, t)
}
-func walkcompareInterface(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkcompareInterface(n ir.Node, init *ir.Nodes) ir.Node {
n.SetRight(cheapexpr(n.Right(), init))
n.SetLeft(cheapexpr(n.Left(), init))
eqtab, eqdata := eqinterface(n.Left(), n.Right())
- var cmp *ir.Node
+ var cmp ir.Node
if n.Op() == ir.OEQ {
cmp = ir.Nod(ir.OANDAND, eqtab, eqdata)
} else {
return finishcompare(n, cmp, init)
}
-func walkcompareString(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkcompareString(n ir.Node, init *ir.Nodes) ir.Node {
// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
- var cs, ncs *ir.Node // const string, non-const string
+ var cs, ncs ir.Node // const string, non-const string
switch {
case ir.IsConst(n.Left(), constant.String) && ir.IsConst(n.Right(), constant.String):
// ignore; will be constant evaluated
}
}
- var r *ir.Node
+ var r ir.Node
if n.Op() == ir.OEQ || n.Op() == ir.ONE {
// prepare for rewrite below
n.SetLeft(cheapexpr(n.Left(), init))
// The result of finishcompare MUST be assigned back to n, e.g.
// n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n, r *ir.Node, init *ir.Nodes) *ir.Node {
+func finishcompare(n, r ir.Node, init *ir.Nodes) ir.Node {
r = typecheck(r, ctxExpr)
r = conv(r, n.Type())
r = walkexpr(r, init)
}
// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *ir.Node, max int64) bool {
+func bounded(n ir.Node, max int64) bool {
if n.Type() == nil || !n.Type().IsInteger() {
return false
}
}
// usemethod checks interface method calls for uses of reflect.Type.Method.
-func usemethod(n *ir.Node) {
+func usemethod(n ir.Node) {
t := n.Left().Type()
// Looking for either of:
}
}
-func usefield(n *ir.Node) {
+func usefield(n ir.Node) {
if objabi.Fieldtrack_enabled == 0 {
return
}
return true
}
-func candiscard(n *ir.Node) bool {
+func candiscard(n ir.Node) bool {
if n == nil {
return true
}
// The result of wrapCall MUST be assigned back to n, e.g.
// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *ir.Node, init *ir.Nodes) *ir.Node {
+func wrapCall(n ir.Node, init *ir.Nodes) ir.Node {
if n.Init().Len() != 0 {
walkstmtlist(n.Init().Slice())
init.AppendNodes(n.PtrInit())
}
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]*ir.Node, n.List().Len())
+ origArgs := make([]ir.Node, n.List().Len())
t := ir.Nod(ir.OTFUNC, nil, nil)
for i, arg := range n.List().Slice() {
s := lookupN("a", i)
// type syntax expression n.Type.
// The result of substArgTypes MUST be assigned back to old, e.g.
// n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *ir.Node, types_ ...*types.Type) *ir.Node {
+func substArgTypes(old ir.Node, types_ ...*types.Type) ir.Node {
n := ir.Copy(old)
for _, t := range types_ {
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
-func isRuneCount(n *ir.Node) bool {
+func isRuneCount(n ir.Node) bool {
return base.Flag.N == 0 && !instrumenting && n.Op() == ir.OLEN && n.Left().Op() == ir.OSTR2RUNES
}
-func walkCheckPtrAlignment(n *ir.Node, init *ir.Nodes, count *ir.Node) *ir.Node {
+func walkCheckPtrAlignment(n ir.Node, init *ir.Nodes, count ir.Node) ir.Node {
if !n.Type().IsPtr() {
base.Fatalf("expected pointer type: %v", n.Type())
}
var walkCheckPtrArithmeticMarker byte
-func walkCheckPtrArithmetic(n *ir.Node, init *ir.Nodes) *ir.Node {
+func walkCheckPtrArithmetic(n ir.Node, init *ir.Nodes) ir.Node {
// Calling cheapexpr(n, init) below leads to a recursive call
// to walkexpr, which leads us back here again. Use n.Opt to
// prevent infinite loops.
// "It is valid both to add and to subtract offsets from a
// pointer in this way. It is also valid to use &^ to round
// pointers, usually for alignment."
- var originals []*ir.Node
- var walk func(n *ir.Node)
- walk = func(n *ir.Node) {
+ var originals []ir.Node
+ var walk func(n ir.Node)
+ walk = func(n ir.Node) {
switch n.Op() {
case ir.OADD:
walk(n.Left())
// checkPtr reports whether pointer checking should be enabled for
// function fn at a given level. See debugHelpFooter for defined
// levels.
-func checkPtr(fn *ir.Node, level int) bool {
+func checkPtr(fn ir.Node, level int) bool {
return base.Debug.Checkptr >= level && fn.Func().Pragma&ir.NoCheckPtr == 0
}
typ := x.Type()
isNode := false
- if n, ok := x.Interface().(Node); ok {
+ if n, ok := x.Interface().(node); ok {
isNode = true
p.printf("%s %s {", n.op.String(), p.addr(x))
} else {
type FmtMode int
type fmtNode struct {
- x *Node
+ x Node
m FmtMode
}
func (f *fmtNodes) Format(s fmt.State, verb rune) { f.x.format(s, verb, f.m) }
-func (n *Node) Format(s fmt.State, verb rune) {
+func (n *node) Format(s fmt.State, verb rune) {
FmtNode(n, s, verb)
}
-func FmtNode(n *Node, s fmt.State, verb rune) {
+func FmtNode(n Node, s fmt.State, verb rune) {
nodeFormat(n, s, verb, FErr)
}
switch arg := arg.(type) {
case Op:
args[i] = &fmtOp{arg, m}
- case *Node:
+ case Node:
args[i] = &fmtNode{arg, m}
case nil:
args[i] = &fmtNode{nil, m} // assume this was a node interface
}
}
-func nodeFormat(n *Node, s fmt.State, verb rune, mode FmtMode) {
+func nodeFormat(n Node, s fmt.State, verb rune, mode FmtMode) {
switch verb {
case 'v', 'S', 'L':
nconvFmt(n, s, fmtFlag(s, verb), mode)
}
// EscFmt is set by the escape analysis code to add escape analysis details to the node print.
-var EscFmt func(n *Node, short bool) string
+var EscFmt func(n Node, short bool) string
// *Node details
-func jconvFmt(n *Node, s fmt.State, flag FmtFlag) {
+func jconvFmt(n Node, s fmt.State, flag FmtFlag) {
short := flag&FmtShort != 0
// Useful to see which nodes in an AST printout are actually identical
return false
}
-func stmtFmt(n *Node, s fmt.State, mode FmtMode) {
+func stmtFmt(n Node, s fmt.State, mode FmtMode) {
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
// and inlining. If it doesn't fit the syntax, emit an enclosing
OEND: 0,
}
-func exprFmt(n *Node, s fmt.State, prec int, mode FmtMode) {
+func exprFmt(n Node, s fmt.State, prec int, mode FmtMode) {
for n != nil && n.Implicit() && (n.Op() == ODEREF || n.Op() == OADDR) {
n = n.Left()
}
}
}
-func nodeFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) {
+func nodeFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
t := n.Type()
// We almost always want the original.
exprFmt(n, s, 0, mode)
}
-func nodeDumpFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) {
+func nodeDumpFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
recur := flag&FmtShort == 0
if recur {
}
}
-func (n *Node) String() string { return fmt.Sprint(n) }
-func modeString(n *Node, mode FmtMode) string { return mode.Sprint(n) }
+func (n *node) String() string { return fmt.Sprint(n) }
+func modeString(n Node, mode FmtMode) string { return mode.Sprint(n) }
// "%L" suffix with "(type %T)" where possible
// "%+S" in debug mode, don't recurse, no multiline output
-func nconvFmt(n *Node, s fmt.State, flag FmtFlag, mode FmtMode) {
+func nconvFmt(n Node, s fmt.State, flag FmtFlag, mode FmtMode) {
if n == nil {
fmt.Fprint(s, "<N>")
return
fmt.Fprintf(w, "%s%+v\n", s, l)
}
-func Dump(s string, n *Node) {
+func Dump(s string, n Node) {
fmt.Printf("%s [%p]%+v\n", s, n, n)
}
// Line returns n's position as a string. If n has been inlined,
// it uses the outermost position where n has been inlined.
-func Line(n *Node) string {
+func Line(n Node) string {
return base.FmtPos(n.Pos())
}
)
// A Node is the abstract interface to an IR node.
-type INode interface {
+type Node interface {
// Formatting
Format(s fmt.State, verb rune)
String() string
SetPos(x src.XPos)
// For making copies. Mainly used by Copy and SepCopy.
- RawCopy() *Node
+ RawCopy() Node
// Abstract graph structure, for generic traversals.
Op() Op
SetOp(x Op)
- Orig() *Node
- SetOrig(x *Node)
+ Orig() Node
+ SetOrig(x Node)
SubOp() Op
SetSubOp(x Op)
- Left() *Node
- SetLeft(x *Node)
- Right() *Node
- SetRight(x *Node)
+ Left() Node
+ SetLeft(x Node)
+ Right() Node
+ SetRight(x Node)
Init() Nodes
PtrInit() *Nodes
SetInit(x Nodes)
SetClass(x Class)
Likely() bool
SetLikely(x bool)
- SliceBounds() (low, high, max *Node)
- SetSliceBounds(low, high, max *Node)
+ SliceBounds() (low, high, max Node)
+ SetSliceBounds(low, high, max Node)
Iota() int64
SetIota(x int64)
Colas() bool
CanBeAnSSASym()
}
-var _ INode = (*Node)(nil)
+var _ Node = (*node)(nil)
// A Node is a single node in the syntax tree.
// Actually the syntax tree is a syntax DAG, because there is only one
// node with Op=ONAME for a given instance of a variable x.
// The same is true for Op=OTYPE and Op=OLITERAL. See Node.mayBeShared.
-type Node struct {
+type node struct {
// Tree structure.
// Generic recursive walks should follow these fields.
- left *Node
- right *Node
+ left Node
+ right Node
init Nodes
body Nodes
list Nodes
// most nodes
typ *types.Type
- orig *Node // original form, for printing, and tracking copies of ONAMEs
+ orig Node // original form, for printing, and tracking copies of ONAMEs
// func
fn *Func
aux uint8
}
-func (n *Node) Left() *Node { return n.left }
-func (n *Node) SetLeft(x *Node) { n.left = x }
-func (n *Node) Right() *Node { return n.right }
-func (n *Node) SetRight(x *Node) { n.right = x }
-func (n *Node) Orig() *Node { return n.orig }
-func (n *Node) SetOrig(x *Node) { n.orig = x }
-func (n *Node) Type() *types.Type { return n.typ }
-func (n *Node) SetType(x *types.Type) { n.typ = x }
-func (n *Node) Func() *Func { return n.fn }
-func (n *Node) SetFunc(x *Func) { n.fn = x }
-func (n *Node) Name() *Name { return n.name }
-func (n *Node) SetName(x *Name) { n.name = x }
-func (n *Node) Sym() *types.Sym { return n.sym }
-func (n *Node) SetSym(x *types.Sym) { n.sym = x }
-func (n *Node) Pos() src.XPos { return n.pos }
-func (n *Node) SetPos(x src.XPos) { n.pos = x }
-func (n *Node) Offset() int64 { return n.offset }
-func (n *Node) SetOffset(x int64) { n.offset = x }
-func (n *Node) Esc() uint16 { return n.esc }
-func (n *Node) SetEsc(x uint16) { n.esc = x }
-func (n *Node) Op() Op { return n.op }
-func (n *Node) SetOp(x Op) { n.op = x }
-func (n *Node) Init() Nodes { return n.init }
-func (n *Node) SetInit(x Nodes) { n.init = x }
-func (n *Node) PtrInit() *Nodes { return &n.init }
-func (n *Node) Body() Nodes { return n.body }
-func (n *Node) SetBody(x Nodes) { n.body = x }
-func (n *Node) PtrBody() *Nodes { return &n.body }
-func (n *Node) List() Nodes { return n.list }
-func (n *Node) SetList(x Nodes) { n.list = x }
-func (n *Node) PtrList() *Nodes { return &n.list }
-func (n *Node) Rlist() Nodes { return n.rlist }
-func (n *Node) SetRlist(x Nodes) { n.rlist = x }
-func (n *Node) PtrRlist() *Nodes { return &n.rlist }
-
-func (n *Node) ResetAux() {
+func (n *node) Left() Node { return n.left }
+func (n *node) SetLeft(x Node) { n.left = x }
+func (n *node) Right() Node { return n.right }
+func (n *node) SetRight(x Node) { n.right = x }
+func (n *node) Orig() Node { return n.orig }
+func (n *node) SetOrig(x Node) { n.orig = x }
+func (n *node) Type() *types.Type { return n.typ }
+func (n *node) SetType(x *types.Type) { n.typ = x }
+func (n *node) Func() *Func { return n.fn }
+func (n *node) SetFunc(x *Func) { n.fn = x }
+func (n *node) Name() *Name { return n.name }
+func (n *node) SetName(x *Name) { n.name = x }
+func (n *node) Sym() *types.Sym { return n.sym }
+func (n *node) SetSym(x *types.Sym) { n.sym = x }
+func (n *node) Pos() src.XPos { return n.pos }
+func (n *node) SetPos(x src.XPos) { n.pos = x }
+func (n *node) Offset() int64 { return n.offset }
+func (n *node) SetOffset(x int64) { n.offset = x }
+func (n *node) Esc() uint16 { return n.esc }
+func (n *node) SetEsc(x uint16) { n.esc = x }
+func (n *node) Op() Op { return n.op }
+func (n *node) SetOp(x Op) { n.op = x }
+func (n *node) Init() Nodes { return n.init }
+func (n *node) SetInit(x Nodes) { n.init = x }
+func (n *node) PtrInit() *Nodes { return &n.init }
+func (n *node) Body() Nodes { return n.body }
+func (n *node) SetBody(x Nodes) { n.body = x }
+func (n *node) PtrBody() *Nodes { return &n.body }
+func (n *node) List() Nodes { return n.list }
+func (n *node) SetList(x Nodes) { n.list = x }
+func (n *node) PtrList() *Nodes { return &n.list }
+func (n *node) Rlist() Nodes { return n.rlist }
+func (n *node) SetRlist(x Nodes) { n.rlist = x }
+func (n *node) PtrRlist() *Nodes { return &n.rlist }
+
+func (n *node) ResetAux() {
n.aux = 0
}
-func (n *Node) SubOp() Op {
+func (n *node) SubOp() Op {
switch n.Op() {
case OASOP, ONAME:
default:
return Op(n.aux)
}
-func (n *Node) SetSubOp(op Op) {
+func (n *node) SetSubOp(op Op) {
switch n.Op() {
case OASOP, ONAME:
default:
n.aux = uint8(op)
}
-func (n *Node) IndexMapLValue() bool {
+func (n *node) IndexMapLValue() bool {
if n.Op() != OINDEXMAP {
base.Fatalf("unexpected op: %v", n.Op())
}
return n.aux != 0
}
-func (n *Node) SetIndexMapLValue(b bool) {
+func (n *node) SetIndexMapLValue(b bool) {
if n.Op() != OINDEXMAP {
base.Fatalf("unexpected op: %v", n.Op())
}
}
}
-func (n *Node) TChanDir() types.ChanDir {
+func (n *node) TChanDir() types.ChanDir {
if n.Op() != OTCHAN {
base.Fatalf("unexpected op: %v", n.Op())
}
return types.ChanDir(n.aux)
}
-func (n *Node) SetTChanDir(dir types.ChanDir) {
+func (n *node) SetTChanDir(dir types.ChanDir) {
if n.Op() != OTCHAN {
base.Fatalf("unexpected op: %v", n.Op())
}
n.aux = uint8(dir)
}
-func IsSynthetic(n *Node) bool {
+func IsSynthetic(n Node) bool {
name := n.Sym().Name
return name[0] == '.' || name[0] == '~'
}
// IsAutoTmp indicates if n was created by the compiler as a temporary,
// based on the setting of the .AutoTemp flag in n's Name.
-func IsAutoTmp(n *Node) bool {
+func IsAutoTmp(n Node) bool {
if n == nil || n.Op() != ONAME {
return false
}
_, nodeEmbedded // ODCLFIELD embedded type
)
-func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) }
-func (n *Node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) }
-func (n *Node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
-func (n *Node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
-
-func (n *Node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
-func (n *Node) NoInline() bool { return n.flags&nodeNoInline != 0 }
-func (n *Node) Implicit() bool { return n.flags&nodeImplicit != 0 }
-func (n *Node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
-func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 }
-func (n *Node) Colas() bool { return n.flags&nodeColas != 0 }
-func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 }
-func (n *Node) Transient() bool { return n.flags&nodeTransient != 0 }
-func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
-func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
-func (n *Node) Likely() bool { return n.flags&nodeLikely != 0 }
-func (n *Node) HasVal() bool { return n.flags&nodeHasVal != 0 }
-func (n *Node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
-func (n *Node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
-
-func (n *Node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
-func (n *Node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
-func (n *Node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
-func (n *Node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
-
-func (n *Node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
-func (n *Node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
-func (n *Node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
-func (n *Node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
-func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
-func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) }
-func (n *Node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
-func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
-func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
-func (n *Node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) }
-func (n *Node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
-func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
+func (n *node) Class() Class { return Class(n.flags.get3(nodeClass)) }
+func (n *node) Walkdef() uint8 { return n.flags.get2(nodeWalkdef) }
+func (n *node) Typecheck() uint8 { return n.flags.get2(nodeTypecheck) }
+func (n *node) Initorder() uint8 { return n.flags.get2(nodeInitorder) }
+
+func (n *node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
+func (n *node) NoInline() bool { return n.flags&nodeNoInline != 0 }
+func (n *node) Implicit() bool { return n.flags&nodeImplicit != 0 }
+func (n *node) IsDDD() bool { return n.flags&nodeIsDDD != 0 }
+func (n *node) Diag() bool { return n.flags&nodeDiag != 0 }
+func (n *node) Colas() bool { return n.flags&nodeColas != 0 }
+func (n *node) NonNil() bool { return n.flags&nodeNonNil != 0 }
+func (n *node) Transient() bool { return n.flags&nodeTransient != 0 }
+func (n *node) Bounded() bool { return n.flags&nodeBounded != 0 }
+func (n *node) HasCall() bool { return n.flags&nodeHasCall != 0 }
+func (n *node) Likely() bool { return n.flags&nodeLikely != 0 }
+func (n *node) HasVal() bool { return n.flags&nodeHasVal != 0 }
+func (n *node) HasOpt() bool { return n.flags&nodeHasOpt != 0 }
+func (n *node) Embedded() bool { return n.flags&nodeEmbedded != 0 }
+
+func (n *node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) }
+func (n *node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) }
+func (n *node) SetTypecheck(b uint8) { n.flags.set2(nodeTypecheck, b) }
+func (n *node) SetInitorder(b uint8) { n.flags.set2(nodeInitorder, b) }
+
+func (n *node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
+func (n *node) SetNoInline(b bool) { n.flags.set(nodeNoInline, b) }
+func (n *node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) }
+func (n *node) SetIsDDD(b bool) { n.flags.set(nodeIsDDD, b) }
+func (n *node) SetDiag(b bool) { n.flags.set(nodeDiag, b) }
+func (n *node) SetColas(b bool) { n.flags.set(nodeColas, b) }
+func (n *node) SetTransient(b bool) { n.flags.set(nodeTransient, b) }
+func (n *node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
+func (n *node) SetLikely(b bool) { n.flags.set(nodeLikely, b) }
+func (n *node) setHasVal(b bool) { n.flags.set(nodeHasVal, b) }
+func (n *node) setHasOpt(b bool) { n.flags.set(nodeHasOpt, b) }
+func (n *node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) }
// MarkNonNil marks a pointer n as being guaranteed non-nil,
// on all code paths, at all times.
// During conversion to SSA, non-nil pointers won't have nil checks
// inserted before dereferencing. See state.exprPtr.
-func (n *Node) MarkNonNil() {
+func (n *node) MarkNonNil() {
if !n.Type().IsPtr() && !n.Type().IsUnsafePtr() {
base.Fatalf("MarkNonNil(%v), type %v", n, n.Type())
}
// When n is an index or slice operation, n does not need bounds checks.
// When n is a dereferencing operation, n does not need nil checks.
// When n is a makeslice+copy operation, n does not need length and cap checks.
-func (n *Node) SetBounded(b bool) {
+func (n *node) SetBounded(b bool) {
switch n.Op() {
case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
// No bounds checks needed.
}
// MarkReadonly indicates that n is an ONAME with readonly contents.
-func (n *Node) MarkReadonly() {
+func (n *node) MarkReadonly() {
if n.Op() != ONAME {
base.Fatalf("Node.MarkReadonly %v", n.Op())
}
}
// Val returns the constant.Value for the node.
-func (n *Node) Val() constant.Value {
+func (n *node) Val() constant.Value {
if !n.HasVal() {
return constant.MakeUnknown()
}
// SetVal sets the constant.Value for the node,
// which must not have been used with SetOpt.
-func (n *Node) SetVal(v constant.Value) {
+func (n *node) SetVal(v constant.Value) {
if n.HasOpt() {
base.Flag.LowerH = 1
Dump("have Opt", n)
}
// Opt returns the optimizer data for the node.
-func (n *Node) Opt() interface{} {
+func (n *node) Opt() interface{} {
if !n.HasOpt() {
return nil
}
// SetOpt sets the optimizer data for the node, which must not have been used with SetVal.
// SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts.
-func (n *Node) SetOpt(x interface{}) {
+func (n *node) SetOpt(x interface{}) {
if x == nil {
if n.HasOpt() {
n.setHasOpt(false)
n.e = x
}
-func (n *Node) Iota() int64 {
+func (n *node) Iota() int64 {
return n.Offset()
}
-func (n *Node) SetIota(x int64) {
+func (n *node) SetIota(x int64) {
n.SetOffset(x)
}
// mayBeShared reports whether n may occur in multiple places in the AST.
// Extra care must be taken when mutating such a node.
-func MayBeShared(n *Node) bool {
+func MayBeShared(n Node) bool {
switch n.Op() {
case ONAME, OLITERAL, ONIL, OTYPE:
return true
}
// funcname returns the name (without the package) of the function n.
-func FuncName(n *Node) string {
+func FuncName(n Node) string {
if n == nil || n.Func() == nil || n.Func().Nname == nil {
return "<nil>"
}
// This differs from the compiler's internal convention where local functions lack a package
// because the ultimate consumer of this is a human looking at an IDE; package is only empty
// if the compilation package is actually the empty string.
-func PkgFuncName(n *Node) string {
+func PkgFuncName(n Node) string {
var s *types.Sym
if n == nil {
return "<nil>"
}
// The compiler needs *Node to be assignable to cmd/compile/internal/ssa.Sym.
-func (n *Node) CanBeAnSSASym() {
+func (n *node) CanBeAnSSASym() {
}
// Name holds Node fields used only by named nodes (ONAME, OTYPE, OPACK, OLABEL, some OLITERAL).
type Name struct {
- Pack *Node // real package for import . names
+ Pack Node // real package for import . names
Pkg *types.Pkg // pkg for OPACK nodes
// For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
// For a closure var, the ONAME node of the outer captured variable
- Defn *Node
+ Defn Node
// The ODCLFUNC node (for a static function/method or a closure) in which
// local variable or param is declared.
- Curfn *Node
+ Curfn Node
Param *Param // additional fields for ONAME, OTYPE
Decldepth int32 // declaration loop depth, increased for every loop or label
// Unique number for ONAME nodes within a function. Function outputs
func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
type Param struct {
- Ntype *Node
- Heapaddr *Node // temp holding heap address of param
+ Ntype Node
+ Heapaddr Node // temp holding heap address of param
// ONAME PAUTOHEAP
- Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
+ Stackcopy Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
// ONAME closure linkage
// Consider:
//
// Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
// and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
- Innermost *Node
- Outer *Node
+ Innermost Node
+ Outer Node
// OTYPE & ONAME //go:embed info,
// sharing storage to reduce gc.Param size.
// the generated ODCLFUNC (as n.Func.Decl), but there is no
// pointer from the Func back to the OCALLPART.
type Func struct {
- Nname *Node // ONAME node
- Decl *Node // ODCLFUNC node
- OClosure *Node // OCLOSURE node
+ Nname Node // ONAME node
+ Decl Node // ODCLFUNC node
+ OClosure Node // OCLOSURE node
Shortname *types.Sym
Exit Nodes
// ONAME nodes for all params/locals for this func/closure, does NOT
// include closurevars until transformclosure runs.
- Dcl []*Node
+ Dcl []Node
ClosureEnter Nodes // list of ONAME nodes of captured variables
- ClosureType *Node // closure representation type
+ ClosureType Node // closure representation type
ClosureCalled bool // closure is only immediately called
ClosureVars Nodes // closure params; each has closurevar set
Cost int32 // heuristic cost of inlining this function
// Copies of Func.Dcl and Nbody for use during inlining.
- Dcl []*Node
- Body []*Node
+ Dcl []Node
+ Body []Node
}
// A Mark represents a scope boundary.
// Nodes is a pointer to a slice of *Node.
// For fields that are not used in most nodes, this is used instead of
// a slice to save space.
-type Nodes struct{ slice *[]*Node }
+type Nodes struct{ slice *[]Node }
// asNodes returns a slice of *Node as a Nodes value.
-func AsNodes(s []*Node) Nodes {
+func AsNodes(s []Node) Nodes {
return Nodes{&s}
}
// Slice returns the entries in Nodes as a slice.
// Changes to the slice entries (as in s[i] = n) will be reflected in
// the Nodes.
-func (n Nodes) Slice() []*Node {
+func (n Nodes) Slice() []Node {
if n.slice == nil {
return nil
}
// Index returns the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) Index(i int) *Node {
+func (n Nodes) Index(i int) Node {
return (*n.slice)[i]
}
// First returns the first element of Nodes (same as n.Index(0)).
// It panics if n has no elements.
-func (n Nodes) First() *Node {
+func (n Nodes) First() Node {
return (*n.slice)[0]
}
// Second returns the second element of Nodes (same as n.Index(1)).
// It panics if n has fewer than two elements.
-func (n Nodes) Second() *Node {
+func (n Nodes) Second() Node {
return (*n.slice)[1]
}
// Set sets n to a slice.
// This takes ownership of the slice.
-func (n *Nodes) Set(s []*Node) {
+func (n *Nodes) Set(s []Node) {
if len(s) == 0 {
n.slice = nil
} else {
}
// Set1 sets n to a slice containing a single node.
-func (n *Nodes) Set1(n1 *Node) {
- n.slice = &[]*Node{n1}
+func (n *Nodes) Set1(n1 Node) {
+ n.slice = &[]Node{n1}
}
// Set2 sets n to a slice containing two nodes.
-func (n *Nodes) Set2(n1, n2 *Node) {
- n.slice = &[]*Node{n1, n2}
+func (n *Nodes) Set2(n1, n2 Node) {
+ n.slice = &[]Node{n1, n2}
}
// Set3 sets n to a slice containing three nodes.
-func (n *Nodes) Set3(n1, n2, n3 *Node) {
- n.slice = &[]*Node{n1, n2, n3}
+func (n *Nodes) Set3(n1, n2, n3 Node) {
+ n.slice = &[]Node{n1, n2, n3}
}
// MoveNodes sets n to the contents of n2, then clears n2.
// SetIndex sets the i'th element of Nodes to node.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) SetIndex(i int, node *Node) {
+func (n Nodes) SetIndex(i int, node Node) {
(*n.slice)[i] = node
}
// SetFirst sets the first element of Nodes to node.
// It panics if n does not have at least one elements.
-func (n Nodes) SetFirst(node *Node) {
+func (n Nodes) SetFirst(node Node) {
(*n.slice)[0] = node
}
// SetSecond sets the second element of Nodes to node.
// It panics if n does not have at least two elements.
-func (n Nodes) SetSecond(node *Node) {
+func (n Nodes) SetSecond(node Node) {
(*n.slice)[1] = node
}
// Addr returns the address of the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
-func (n Nodes) Addr(i int) **Node {
+func (n Nodes) Addr(i int) *Node {
return &(*n.slice)[i]
}
// Append appends entries to Nodes.
-func (n *Nodes) Append(a ...*Node) {
+func (n *Nodes) Append(a ...Node) {
if len(a) == 0 {
return
}
if n.slice == nil {
- s := make([]*Node, len(a))
+ s := make([]Node, len(a))
copy(s, a)
n.slice = &s
return
// Prepend prepends entries to Nodes.
// If a slice is passed in, this will take ownership of it.
-func (n *Nodes) Prepend(a ...*Node) {
+func (n *Nodes) Prepend(a ...Node) {
if len(a) == 0 {
return
}
// inspect invokes f on each node in an AST in depth-first order.
// If f(n) returns false, inspect skips visiting n's children.
-func Inspect(n *Node, f func(*Node) bool) {
+func Inspect(n Node, f func(Node) bool) {
if n == nil || !f(n) {
return
}
InspectList(n.Rlist(), f)
}
-func InspectList(l Nodes, f func(*Node) bool) {
+func InspectList(l Nodes, f func(Node) bool) {
for _, n := range l.Slice() {
Inspect(n, f)
}
// nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is
// a ready-to-use empty queue.
type NodeQueue struct {
- ring []*Node
+ ring []Node
head, tail int
}
}
// pushRight appends n to the right of the queue.
-func (q *NodeQueue) PushRight(n *Node) {
+func (q *NodeQueue) PushRight(n Node) {
if len(q.ring) == 0 {
- q.ring = make([]*Node, 16)
+ q.ring = make([]Node, 16)
} else if q.head+len(q.ring) == q.tail {
// Grow the ring.
- nring := make([]*Node, len(q.ring)*2)
+ nring := make([]Node, len(q.ring)*2)
// Copy the old elements.
part := q.ring[q.head%len(q.ring):]
if q.tail-q.head <= len(part) {
// popLeft pops a node from the left of the queue. It panics if q is
// empty.
-func (q *NodeQueue) PopLeft() *Node {
+func (q *NodeQueue) PopLeft() Node {
if q.Empty() {
panic("dequeue empty")
}
}
// NodeSet is a set of Nodes.
-type NodeSet map[*Node]struct{}
+type NodeSet map[Node]struct{}
// Has reports whether s contains n.
-func (s NodeSet) Has(n *Node) bool {
+func (s NodeSet) Has(n Node) bool {
_, isPresent := s[n]
return isPresent
}
// Add adds n to s.
-func (s *NodeSet) Add(n *Node) {
+func (s *NodeSet) Add(n Node) {
if *s == nil {
- *s = make(map[*Node]struct{})
+ *s = make(map[Node]struct{})
}
(*s)[n] = struct{}{}
}
// Sorted returns s sorted according to less.
-func (s NodeSet) Sorted(less func(*Node, *Node) bool) []*Node {
- var res []*Node
+func (s NodeSet) Sorted(less func(Node, Node) bool) []Node {
+ var res []Node
for n := range s {
res = append(res, n)
}
return res
}
-func Nod(op Op, nleft, nright *Node) *Node {
+func Nod(op Op, nleft, nright Node) Node {
return NodAt(base.Pos, op, nleft, nright)
}
-func NodAt(pos src.XPos, op Op, nleft, nright *Node) *Node {
- var n *Node
+func NodAt(pos src.XPos, op Op, nleft, nright Node) Node {
+ var n Node
switch op {
case ODCLFUNC:
var x struct {
- n Node
+ n node
f Func
}
n = &x.n
base.Fatalf("use newname instead")
case OLABEL, OPACK:
var x struct {
- n Node
+ n node
m Name
}
n = &x.n
n.SetName(&x.m)
default:
- n = new(Node)
+ n = new(node)
}
n.SetOp(op)
n.SetLeft(nleft)
// newnamel returns a new ONAME Node associated with symbol s at position pos.
// The caller is responsible for setting n.Name.Curfn.
-func NewNameAt(pos src.XPos, s *types.Sym) *Node {
+func NewNameAt(pos src.XPos, s *types.Sym) Node {
if s == nil {
base.Fatalf("newnamel nil")
}
var x struct {
- n Node
+ n node
m Name
p Param
}
Pos src.XPos // line of call
}
-func AsNode(n types.IRNode) *Node {
+func AsNode(n types.IRNode) Node {
if n == nil {
return nil
}
- return n.(*Node)
+ return n.(Node)
}
-var BlankNode *Node
+var BlankNode Node
// origSym returns the original symbol written by the user.
func OrigSym(s *types.Sym) *types.Sym {
// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
// n must be a slice expression. max is nil if n is a simple slice expression.
-func (n *Node) SliceBounds() (low, high, max *Node) {
+func (n *node) SliceBounds() (low, high, max Node) {
if n.List().Len() == 0 {
return nil, nil, nil
}
// SetSliceBounds sets n's slice bounds, where n is a slice expression.
// n must be a slice expression. If max is non-nil, n must be a full slice expression.
-func (n *Node) SetSliceBounds(low, high, max *Node) {
+func (n *node) SetSliceBounds(low, high, max Node) {
switch n.Op() {
case OSLICE, OSLICEARR, OSLICESTR:
if max != nil {
return false
}
-func IsConst(n *Node, ct constant.Kind) bool {
+func IsConst(n Node, ct constant.Kind) bool {
return ConstType(n) == ct
}
// Int64Val returns n as an int64.
// n must be an integer or rune constant.
-func (n *Node) Int64Val() int64 {
+func (n *node) Int64Val() int64 {
if !IsConst(n, constant.Int) {
base.Fatalf("Int64Val(%v)", n)
}
}
// CanInt64 reports whether it is safe to call Int64Val() on n.
-func (n *Node) CanInt64() bool {
+func (n *node) CanInt64() bool {
if !IsConst(n, constant.Int) {
return false
}
// Uint64Val returns n as an uint64.
// n must be an integer or rune constant.
-func (n *Node) Uint64Val() uint64 {
+func (n *node) Uint64Val() uint64 {
if !IsConst(n, constant.Int) {
base.Fatalf("Uint64Val(%v)", n)
}
// BoolVal returns n as a bool.
// n must be a boolean constant.
-func (n *Node) BoolVal() bool {
+func (n *node) BoolVal() bool {
if !IsConst(n, constant.Bool) {
base.Fatalf("BoolVal(%v)", n)
}
// StringVal returns the value of a literal string Node as a string.
// n must be a string constant.
-func (n *Node) StringVal() string {
+func (n *node) StringVal() string {
if !IsConst(n, constant.String) {
base.Fatalf("StringVal(%v)", n)
}
// rawcopy returns a shallow copy of n.
// Note: copy or sepcopy (rather than rawcopy) is usually the
// correct choice (see comment with Node.copy, below).
-func (n *Node) RawCopy() *Node {
+func (n *node) RawCopy() Node {
copy := *n
return ©
}
// sepcopy returns a separate shallow copy of n, with the copy's
// Orig pointing to itself.
-func SepCopy(n *Node) *Node {
+func SepCopy(n Node) Node {
n = n.RawCopy()
n.SetOrig(n)
return n
// represent the original node anymore.
// (This caused the wrong complit Op to be used when printing error
// messages; see issues #26855, #27765).
-func Copy(n *Node) *Node {
+func Copy(n Node) Node {
copy := n.RawCopy()
if n.Orig() == n {
copy.SetOrig(copy)
}
// isNil reports whether n represents the universal untyped zero value "nil".
-func IsNil(n *Node) bool {
+func IsNil(n Node) bool {
// Check n.Orig because constant propagation may produce typed nil constants,
// which don't exist in the Go spec.
return n.Orig().Op() == ONIL
}
-func IsBlank(n *Node) bool {
+func IsBlank(n Node) bool {
if n == nil {
return false
}
// IsMethod reports whether n is a method.
// n must be a function or a method.
-func IsMethod(n *Node) bool {
+func IsMethod(n Node) bool {
return n.Type().Recv() != nil
}
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {Func{}, 136, 248},
- {Name{}, 32, 56},
- {Param{}, 24, 48},
- {Node{}, 76, 128},
+ {Func{}, 152, 280},
+ {Name{}, 44, 80},
+ {Param{}, 44, 88},
+ {node{}, 88, 152},
}
for _, tt := range tests {
"cmd/compile/internal/types"
)
-func ConstType(n *Node) constant.Kind {
+func ConstType(n Node) constant.Kind {
if n == nil || n.Op() != OLITERAL {
return constant.Unknown
}
// ValueInterface returns the constant value stored in n as an interface{}.
// It returns int64s for ints and runes, float64s for floats,
// and complex128s for complex values.
-func ConstValue(n *Node) interface{} {
+func ConstValue(n Node) interface{} {
switch v := n.Val(); v.Kind() {
default:
base.Fatalf("unexpected constant: %v", v)
}
// nodlit returns a new untyped constant with value v.
-func NewLiteral(v constant.Value) *Node {
+func NewLiteral(v constant.Value) Node {
n := Nod(OLITERAL, nil, nil)
if k := v.Kind(); k != constant.Unknown {
n.SetType(idealType(k))
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
p.To.Reg = v.Reg()
}
- case *obj.LSym, *ir.Node:
+ case *obj.LSym, ir.Node:
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
wantreg = "SP"
gc.AddAux(&p.From, v)
case nil:
// Auto returns a Node for an auto variable of the given type.
// The SSA compiler uses this function to allocate space for spills.
- Auto(src.XPos, *types.Type) *ir.Node
+ Auto(src.XPos, *types.Type) ir.Node
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
// reaches stores then we delete all the stores. The other operations will then
// be eliminated by the dead code elimination pass.
func elimDeadAutosGeneric(f *Func) {
- addr := make(map[*Value]*ir.Node) // values that the address of the auto reaches
- elim := make(map[*Value]*ir.Node) // values that could be eliminated if the auto is
- used := make(map[*ir.Node]bool) // used autos that must be kept
+ addr := make(map[*Value]ir.Node) // values that the address of the auto reaches
+ elim := make(map[*Value]ir.Node) // values that could be eliminated if the auto is
+ used := make(map[ir.Node]bool) // used autos that must be kept
// visit the value and report whether any of the maps are updated
visit := func(v *Value) (changed bool) {
switch v.Op {
case OpAddr, OpLocalAddr:
// Propagate the address if it points to an auto.
- n, ok := v.Aux.(*ir.Node)
+ n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
return
case OpVarDef, OpVarKill:
// v should be eliminated if we eliminate the auto.
- n, ok := v.Aux.(*ir.Node)
+ n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
// for open-coded defers from being removed (since they
// may not be used by the inline code, but will be used by
// panic processing).
- n, ok := v.Aux.(*ir.Node)
+ n, ok := v.Aux.(ir.Node)
if !ok || n.Class() != ir.PAUTO {
return
}
}
// Propagate any auto addresses through v.
- var node *ir.Node
+ var node ir.Node
for _, a := range args {
if n, ok := addr[a]; ok && !used[n] {
if node == nil {
// Loop over all ops that affect autos taking note of which
// autos we need and also stores that we might be able to
// eliminate.
- seen := make(map[*ir.Node]bool)
+ seen := make(map[ir.Node]bool)
var stores []*Value
for _, b := range f.Blocks {
for _, v := range b.Values {
- n, ok := v.Aux.(*ir.Node)
+ n, ok := v.Aux.(ir.Node)
if !ok {
continue
}
// Eliminate stores to unread autos.
for _, store := range stores {
- n, _ := store.Aux.(*ir.Node)
+ n, _ := store.Aux.(ir.Node)
if seen[n] {
continue
}
// Slots is all the slots used in the debug info, indexed by their SlotID.
Slots []LocalSlot
// The user variables, indexed by VarID.
- Vars []*ir.Node
+ Vars []ir.Node
// The slots that make up each variable, indexed by VarID.
VarSlots [][]SlotID
// The location list data, indexed by VarID. Must be processed by PutLocationList.
type debugState struct {
// See FuncDebug.
slots []LocalSlot
- vars []*ir.Node
+ vars []ir.Node
varSlots [][]SlotID
lists [][]byte
// The pending location list entry for each user variable, indexed by VarID.
pendingEntries []pendingEntry
- varParts map[*ir.Node][]SlotID
+ varParts map[ir.Node][]SlotID
blockDebug []BlockDebug
pendingSlotLocs []VarLoc
liveSlots []liveSlot
}
if state.varParts == nil {
- state.varParts = make(map[*ir.Node][]SlotID)
+ state.varParts = make(map[ir.Node][]SlotID)
} else {
for n := range state.varParts {
delete(state.varParts, n)
for _, b := range f.Blocks {
for _, v := range b.Values {
if v.Op == OpVarDef || v.Op == OpVarKill {
- n := v.Aux.(*ir.Node)
+ n := v.Aux.(ir.Node)
if ir.IsSynthetic(n) {
continue
}
switch {
case v.Op == OpVarDef, v.Op == OpVarKill:
- n := v.Aux.(*ir.Node)
+ n := v.Aux.(ir.Node)
if ir.IsSynthetic(n) {
break
}
func (TestFrontend) StringData(s string) *obj.LSym {
return nil
}
-func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Node {
+func (TestFrontend) Auto(pos src.XPos, t *types.Type) ir.Node {
n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
n.SetClass(ir.PAUTO)
return n
// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
// parent = &{N: s, Type: string}
type LocalSlot struct {
- N *ir.Node // an ONAME *gc.Node representing a stack location.
+ N ir.Node // an ONAME *gc.Node representing a stack location.
Type *types.Type // type of slot
Off int64 // offset of slot in N
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
- if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Node).Type().HasPointers()) {
+ if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(ir.Node).Type().HasPointers()) {
// These ops don't really change memory.
continue
// Note: OpVarDef requires that the defined variable not have pointers.
// This forces later liveness analysis to make the
// value live at this point.
v.SetArg(0, s.makeSpill(a, b))
- } else if _, ok := a.Aux.(*ir.Node); ok && vi.rematerializeable {
+ } else if _, ok := a.Aux.(ir.Node); ok && vi.rematerializeable {
// Rematerializeable value with a gc.Node. This is the address of
// a stack object (e.g. an LEAQ). Keep the object live.
// Change it to VarLive, which is what plive expects for locals.
}{
{Value{}, 72, 112},
{Block{}, 164, 304},
- {LocalSlot{}, 28, 40},
+ {LocalSlot{}, 32, 48},
{valState{}, 28, 40},
}
if v.Aux == nil {
f.Fatalf("%s has nil Aux\n", v.LongString())
}
- loc := LocalSlot{N: v.Aux.(*ir.Node), Type: v.Type, Off: v.AuxInt}
+ loc := LocalSlot{N: v.Aux.(ir.Node), Type: v.Type, Off: v.AuxInt}
if f.pass.debug > stackDebug {
fmt.Printf("stackalloc %s to %s\n", v, loc)
}
switch v.Aux.(type) {
case *obj.LSym:
gc.AddAux(&p.From, v)
- case *ir.Node:
+ case ir.Node:
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
default: