*(*uint16)(f) &^= mask
}
}
+
+type bitset32 uint32
+
+func (f *bitset32) set(mask uint32, b bool) {
+ if b {
+ *(*uint32)(f) |= mask
+ } else {
+ *(*uint32)(f) &^= mask
+ }
+}
if v.Name.Byval() {
// If v is captured by value, we merely downgrade it to PPARAM.
v.Class = PPARAM
-
- v.Ullman = 1
fld.Nname = v
} else {
// If v of type T is captured by reference,
if v.Name.Byval() && v.Type.Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
v.Class = PAUTO
- v.Ullman = 1
xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
body = append(body, nod(OAS, v, cv))
} else {
ptr.Sym = lookup("rcvr")
ptr.Class = PAUTO
ptr.SetAddable(true)
- ptr.Ullman = 1
ptr.SetUsed(true)
ptr.Name.Curfn = xfunc
ptr.Xoffset = 0
n := nod(ONAME, nil, nil)
n.Sym = s
n.SetAddable(true)
- n.Ullman = 1
n.Xoffset = 0
return n
}
n := nod(ONONAME, nil, nil)
n.Sym = s
n.SetAddable(true)
- n.Ullman = 1
n.Xoffset = 0
return n
}
c.SetIsddd(n.Isddd())
c.Name.Defn = n
c.SetAddable(false)
- c.Ullman = 2
c.Name.Funcdepth = funcdepth
// Link into list of active closure variables.
func (n *Node) jconv(s fmt.State, flag FmtFlag) {
c := flag & FmtShort
- if c == 0 && n.Ullman != 0 {
- fmt.Fprintf(s, " u(%d)", n.Ullman)
- }
-
if c == 0 && n.Addable() {
fmt.Fprintf(s, " a(%v)", n.Addable())
}
fmt.Fprint(s, " nonnil")
}
+ if c == 0 && n.HasCall() {
+ fmt.Fprintf(s, " hascall")
+ }
+
if c == 0 && n.Used() {
fmt.Fprintf(s, " used(%v)", n.Used())
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
n.Class = PAUTOHEAP
- n.Ullman = 2
n.Xoffset = 0
n.Name.Param.Heapaddr = heapaddr
n.Esc = EscHeap
n.Type = t
n.Class = PAUTO
n.SetAddable(true)
- n.Ullman = 1
n.Esc = EscNever
n.Name.Curfn = Curfn
n.Name.SetAutoTemp(true)
}
n.Ninit.AppendNodes(&init)
- n.Ullman = UINF
+ n.SetHasCall(true)
}
n := nod(OADDR, s.Def, nil)
n.Type = ptrto(s.Def.Type)
n.SetAddable(true)
- n.Ullman = 2
n.Typecheck = 1
return n
}
n := nod(OADDR, s.Def, nil)
n.Type = ptrto(s.Def.Type)
n.SetAddable(true)
- n.Ullman = 2
n.Typecheck = 1
return n
}
n.Type = t
n.Class = PAUTO
n.SetAddable(true)
- n.Ullman = 1
n.Esc = EscNever
n.Xoffset = 0
n.Name.Curfn = Curfn
c.SetVal(Val{new(Mpint)})
c.Val().U.(*Mpint).SetInt64(v)
c.Type = Types[TIDEAL]
- ullmancalc(c)
return c
}
c.SetVal(Val{newMpflt()})
c.Val().U.(*Mpflt).Set(v)
c.Type = Types[TIDEAL]
- ullmancalc(c)
return c
}
*n = Node{}
n.Op = OLITERAL
n.SetAddable(true)
- ullmancalc(n)
n.SetVal(Val{new(Mpint)})
n.Val().U.(*Mpint).SetInt64(v)
n.Type = t
}
}
-// calculate sethi/ullman number
-// roughly how many registers needed to
-// compile a node. used to compile the
-// hardest side first to minimize registers.
-func ullmancalc(n *Node) {
+// updateHasCall checks whether expression n contains any function
+// calls and sets the n.HasCall flag if so.
+func updateHasCall(n *Node) {
if n == nil {
return
}
- var ul int
- var ur int
+ b := false
if n.Ninit.Len() != 0 {
- ul = UINF
+ // TODO(mdempsky): This seems overly conservative.
+ b = true
goto out
}
switch n.Op {
case OLITERAL, ONAME:
- ul = 1
- if n.Class == PAUTOHEAP {
- ul++
- }
- goto out
-
case OAS:
- if !needwritebarrier(n.Left) {
- break
+ if needwritebarrier(n.Left) {
+ b = true
+ goto out
}
- fallthrough
case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
- ul = UINF
+ b = true
goto out
-
- // hard with instrumented code
case OANDAND, OOROR:
+ // hard with instrumented code
if instrumenting {
- ul = UINF
+ b = true
goto out
}
case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR,
OIND, ODOTPTR, ODOTTYPE, ODIV, OMOD:
// These ops might panic, make sure they are done
// before we start marshaling args for a call. See issue 16760.
- ul = UINF
+ b = true
goto out
}
- ul = 1
- if n.Left != nil {
- ul = int(n.Left.Ullman)
- }
- ur = 1
- if n.Right != nil {
- ur = int(n.Right.Ullman)
- }
- if ul == ur {
- ul += 1
+ if n.Left != nil && n.Left.HasCall() {
+ b = true
+ goto out
}
- if ur > ul {
- ul = ur
+ if n.Right != nil && n.Right.HasCall() {
+ b = true
+ goto out
}
out:
- if ul > 200 {
- ul = 200 // clamp to uchar with room to grow
- }
- n.Ullman = uint8(ul)
+ n.SetHasCall(b)
}
func badtype(op Op, tl *Type, tr *Type) {
}
n.Ninit.Prepend(init...)
- n.Ullman = UINF
+ n.SetHasCall(true)
return n
}
Pos src.XPos
- flags bitset16
+ flags bitset32
Esc uint16 // EscXXX
Op Op
- Ullman uint8 // sethi/ullman number
Etype EType // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg, ChanDir for OTCHAN, for OINDEXMAP 1=LHS,0=RHS
Class Class // PPARAM, PAUTO, PEXTERN, etc
Embedded uint8 // ODCLFIELD embedded type
nodeBounded // bounds check unnecessary
nodeAddable // addressable
nodeUsed // for variable/label declared and not used error
+ nodeHasCall // expression contains a function call
)
func (n *Node) HasBreak() bool { return n.flags&nodeHasBreak != 0 }
func (n *Node) Bounded() bool { return n.flags&nodeBounded != 0 }
func (n *Node) Addable() bool { return n.flags&nodeAddable != 0 }
func (n *Node) Used() bool { return n.flags&nodeUsed != 0 }
+func (n *Node) HasCall() bool { return n.flags&nodeHasCall != 0 }
func (n *Node) SetHasBreak(b bool) { n.flags.set(nodeHasBreak, b) }
func (n *Node) SetIsClosureVar(b bool) { n.flags.set(nodeIsClosureVar, b) }
func (n *Node) SetBounded(b bool) { n.flags.set(nodeBounded, b) }
func (n *Node) SetAddable(b bool) { n.flags.set(nodeAddable, b) }
func (n *Node) SetUsed(b bool) { n.flags.set(nodeUsed, b) }
+func (n *Node) SetHasCall(b bool) { n.flags.set(nodeHasCall, b) }
// Val returns the Val for the node.
func (n *Node) Val() Val {
lr := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
ll = append(ll, lr...)
n.Left.Left = nil
- ullmancalc(n.Left)
+ updateHasCall(n.Left)
n.List.Set(reorder1(ll))
case OAS:
n = typecheck(n, Erv)
}
- ullmancalc(n)
+ updateHasCall(n)
if Debug['w'] != 0 && n != nil {
Dump("walk", n)
// evaluating the lv or a function call
// in the conversion of the types
func fncall(l *Node, rt *Type) bool {
- if l.Ullman >= UINF || l.Op == OINDEXMAP {
+ if l.HasCall() || l.Op == OINDEXMAP {
return true
}
if needwritebarrier(l) {
a := nod(OAS, l, nodarg(r, 0))
a = convas(a, &nn)
- ullmancalc(a)
- if a.Ullman >= UINF {
+ updateHasCall(a)
+ if a.HasCall() {
Dump("ascompatet ucount", a)
ullmanOverflow = true
}
}
out:
- ullmancalc(n)
+ updateHasCall(n)
return n
}
for _, n := range all {
t++
- ullmancalc(n)
- if n.Ullman >= UINF {
+ updateHasCall(n)
+ if n.HasCall() {
c++
}
}
d := 0
var a *Node
for _, n := range all {
- if n.Ullman < UINF {
+ if !n.HasCall() {
r = append(r, n)
continue
}
case PPARAM, PAUTO:
break
- // assignment to non-stack variable
- // must be delayed if right has function calls.
default:
- if r.Ullman >= UINF {
+ // assignment to non-stack variable must be
+ // delayed if right has function calls.
+ if r.HasCall() {
return true
}
}