package ssa
+import "container/heap"
+
const (
ScorePhi = iota // towards top of block
ScoreVarDef
ScoreDefault
ScoreFlags
ScoreControl // towards bottom of block
-
- ScoreCount // not a real score
)
+type ValHeap struct {
+ a []*Value
+ less func(a, b *Value) bool
+}
+
+func (h ValHeap) Len() int { return len(h.a) }
+func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *ValHeap) Push(x interface{}) {
+ // Push and Pop use pointer receivers because they modify the slice's length,
+ // not just its contents.
+ v := x.(*Value)
+ h.a = append(h.a, v)
+}
+func (h *ValHeap) Pop() interface{} {
+ old := h.a
+ n := len(old)
+ x := old[n-1]
+ h.a = old[0 : n-1]
+ return x
+}
+func (h ValHeap) Less(i, j int) bool { return h.less(h.a[i], h.a[j]) }
+
// Schedule the Values in each Block. After this phase returns, the
// order of b.Values matters and is the order in which those values
// will appear in the assembly output. For now it generates a
func schedule(f *Func) {
// For each value, the number of times it is used in the block
// by values that have not been scheduled yet.
- uses := make([]int, f.NumValues())
+ uses := make([]int32, f.NumValues())
// "priority" for a value
- score := make([]uint8, f.NumValues())
+ score := make([]int8, f.NumValues())
// scheduling order. We queue values in this list in reverse order.
var order []*Value
- // priority queue of legally schedulable (0 unscheduled uses) values
- var priq [ScoreCount][]*Value
-
// maps mem values to the next live memory value
nextMem := make([]*Value, f.NumValues())
// additional pretend arguments for each Value. Used to enforce load/store ordering.
additionalArgs := make([][]*Value, f.NumValues())
+ for _, b := range f.Blocks {
+ // Compute score. Larger numbers are scheduled closer to the end of the block.
+ for _, v := range b.Values {
+ switch {
+ case v.Op == OpAMD64LoweredGetClosurePtr:
+ // We also score GetLoweredClosurePtr as early as possible to ensure that the
+ // context register is not stomped. GetLoweredClosurePtr should only appear
+ // in the entry block where there are no phi functions, so there is no
+ // conflict or ambiguity here.
+ if b != f.Entry {
+ f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
+ }
+ score[v.ID] = ScorePhi
+ case v.Op == OpPhi:
+ // We want all the phis first.
+ score[v.ID] = ScorePhi
+ case v.Op == OpVarDef:
+ // We want all the vardefs next.
+ score[v.ID] = ScoreVarDef
+ case v.Type.IsMemory():
+ // Schedule stores as early as possible. This tends to
+ // reduce register pressure. It also helps make sure
+ // VARDEF ops are scheduled before the corresponding LEA.
+ score[v.ID] = ScoreMemory
+ case v.Type.IsFlags():
+ // Schedule flag register generation as late as possible.
+ // This makes sure that we only have one live flags
+ // value at a time.
+ score[v.ID] = ScoreFlags
+ default:
+ score[v.ID] = ScoreDefault
+ }
+ }
+ }
+
for _, b := range f.Blocks {
// Find store chain for block.
// Store chains for different blocks overwrite each other, so
uses[v.ID]++
}
}
- // Compute score. Larger numbers are scheduled closer to the end of the block.
- for _, v := range b.Values {
- switch {
- case v.Op == OpAMD64LoweredGetClosurePtr:
- // We also score GetLoweredClosurePtr as early as possible to ensure that the
- // context register is not stomped. GetLoweredClosurePtr should only appear
- // in the entry block where there are no phi functions, so there is no
- // conflict or ambiguity here.
- if b != f.Entry {
- f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
- }
- score[v.ID] = ScorePhi
- case v.Op == OpPhi:
- // We want all the phis first.
- score[v.ID] = ScorePhi
- case v.Op == OpVarDef:
- // We want all the vardefs next.
- score[v.ID] = ScoreVarDef
- case v.Type.IsMemory():
- // Schedule stores as early as possible. This tends to
- // reduce register pressure. It also helps make sure
- // VARDEF ops are scheduled before the corresponding LEA.
- score[v.ID] = ScoreMemory
- case v.Type.IsFlags():
- // Schedule flag register generation as late as possible.
- // This makes sure that we only have one live flags
- // value at a time.
- score[v.ID] = ScoreFlags
- default:
- score[v.ID] = ScoreDefault
- }
- }
+
if b.Control != nil && b.Control.Op != OpPhi {
// Force the control value to be scheduled at the end,
// unless it is a phi value (which must be first).
}
}
- // Initialize priority queue with schedulable values.
- for i := range priq {
- priq[i] = priq[i][:0]
+ // To put things into a priority queue
+ // The values that should come last are least.
+ priq := &ValHeap{
+ a: make([]*Value, 0, 8), // TODO allocate once and reuse.
+ less: func(x, y *Value) bool {
+ sx := score[x.ID]
+ sy := score[y.ID]
+ if c := sx - sy; c != 0 {
+ return c > 0 // higher score comes later.
+ }
+ if x.Line != y.Line { // Favor in-order line stepping
+ return x.Line > y.Line
+ }
+ if x.Op != OpPhi {
+ if c := len(x.Args) - len(y.Args); c != 0 {
+ return c < 0 // smaller args comes later
+ }
+ }
+ return x.ID > y.ID
+ },
}
+
+ // Initialize priority queue with schedulable values.
for _, v := range b.Values {
if uses[v.ID] == 0 {
- s := score[v.ID]
- priq[s] = append(priq[s], v)
+ heap.Push(priq, v)
}
}
order = order[:0]
for {
// Find highest priority schedulable value.
- var v *Value
- for i := len(priq) - 1; i >= 0; i-- {
- n := len(priq[i])
- if n == 0 {
- continue
- }
- v = priq[i][n-1]
- priq[i] = priq[i][:n-1]
- break
- }
- if v == nil {
+ // Note that schedule is assembled backwards.
+
+ if priq.Len() == 0 {
break
}
+ v := heap.Pop(priq).(*Value)
+
// Add it to the schedule.
order = append(order, v)
uses[w.ID]--
if uses[w.ID] == 0 {
// All uses scheduled, w is now schedulable.
- s := score[w.ID]
- priq[s] = append(priq[s], w)
+ heap.Push(priq, w)
}
}
for _, w := range additionalArgs[v.ID] {
uses[w.ID]--
if uses[w.ID] == 0 {
// All uses scheduled, w is now schedulable.
- s := score[w.ID]
- priq[s] = append(priq[s], w)
+ heap.Push(priq, w)
}
}
}