// After this phase returns, the order of f.Blocks matters and is the order
// in which those blocks will appear in the assembly output.
func layout(f *Func) {
+ f.Blocks = layoutOrder(f)
+}
+
+// Register allocation may use a different order which has constraints
+// imposed by the linear-scan algorithm. Note that that f.pass here is
+// regalloc, so the switch is conditional on -d=ssa/regalloc/test=N
+func layoutRegallocOrder(f *Func) []*Block {
+
+ switch f.pass.test {
+ case 0: // layout order
+ return layoutOrder(f)
+ case 1: // existing block order
+ return f.Blocks
+ case 2: // reverse of postorder; legal, but usually not good.
+ po := f.postorder()
+ visitOrder := make([]*Block, len(po))
+ for i, b := range po {
+ j := len(po) - i - 1
+ visitOrder[j] = b
+ }
+ return visitOrder
+ }
+
+ return nil
+}
+
+func layoutOrder(f *Func) []*Block {
order := make([]*Block, 0, f.NumBlocks())
scheduled := make([]bool, f.NumBlocks())
idToBlock := make([]*Block, f.NumBlocks())
}
}
}
- f.Blocks = order
+ return order
}
copies map[*Value]bool
loopnest *loopnest
+
+ // choose a good order in which to visit blocks for allocation purposes.
+ visitOrder []*Block
}
type endReg struct {
s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
}
+ // Linear scan register allocation can be influenced by the order in which blocks appear.
+ // Decouple the register allocation order from the generated block order.
+ // This also creates an opportunity for experiments to find a better order.
+ s.visitOrder = layoutRegallocOrder(f)
+
+ // Compute block order. This array allows us to distinguish forward edges
+ // from backward edges and compute how far they go.
+ blockOrder := make([]int32, f.NumBlocks())
+ for i, b := range s.visitOrder {
+ blockOrder[b.ID] = int32(i)
+ }
+
s.regs = make([]regState, s.numRegs)
s.values = make([]valState, f.NumValues())
s.orig = make([]*Value, f.NumValues())
s.copies = make(map[*Value]bool)
- for _, b := range f.Blocks {
+ for _, b := range s.visitOrder {
for _, v := range b.Values {
if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
s.values[v.ID].needReg = true
}
s.computeLive()
- // Compute block order. This array allows us to distinguish forward edges
- // from backward edges and compute how far they go.
- blockOrder := make([]int32, f.NumBlocks())
- for i, b := range f.Blocks {
- blockOrder[b.ID] = int32(i)
- }
-
// Compute primary predecessors.
s.primary = make([]int32, f.NumBlocks())
- for _, b := range f.Blocks {
+ for _, b := range s.visitOrder {
best := -1
for i, e := range b.Preds {
p := e.b
f.Fatalf("entry block must be first")
}
- for _, b := range f.Blocks {
+ for _, b := range s.visitOrder {
if s.f.pass.debug > regDebug {
fmt.Printf("Begin processing block %v\n", b)
}
}
}
- for _, b := range f.Blocks {
+ for _, b := range s.visitOrder {
i := 0
for _, v := range b.Values {
if v.Op == OpInvalid {
// Precompute some useful info.
phiRegs := make([]regMask, f.NumBlocks())
- for _, b := range f.Blocks {
+ for _, b := range s.visitOrder {
var m regMask
for _, v := range b.Values {
if v.Op != OpPhi {
// Insert spill instructions into the block schedules.
var oldSched []*Value
- for _, b := range f.Blocks {
+ for _, b := range s.visitOrder {
nphi := 0
for _, v := range b.Values {
if v.Op != OpPhi {
fmt.Println(s.f.String())
}
- for _, b := range s.f.Blocks {
+ for _, b := range s.visitOrder {
if len(b.Preds) <= 1 {
continue
}