From: Heschi Kreinick Date: Thu, 19 Oct 2017 22:59:41 +0000 (-0400) Subject: cmd/compile/internal/ssa: use reverse postorder traversal X-Git-Tag: go1.10beta1~615 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=73f1a1a1a7f7781c095ef6aebe8b0adf0669d6b2;p=gostls13.git cmd/compile/internal/ssa: use reverse postorder traversal Instead of the hand-written control flow analysis in debug info generation, use a reverse postorder traversal, which is basically the same thing. It should be slightly faster. More importantly, the previous version simply gave up in the case of non-reducible functions, and produced output that caused a later stage to crash. It turns out that there's a non-reducible function in compress/flate, so that wasn't a theoretical issue. With this change, all blocks will be visited, even for non-reducible functions. Change-Id: Id47536764ee93203c6b4105a1a3013fe3265aa12 Reviewed-on: https://go-review.googlesource.com/73110 Run-TryBot: Heschi Kreinick TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go index e4707fb310..cc0ff0cee7 100644 --- a/src/cmd/compile/fmt_test.go +++ b/src/cmd/compile/fmt_test.go @@ -606,7 +606,6 @@ var knownFormats = map[string]string{ "[16]byte %x": "", "[]*cmd/compile/internal/gc.Node %v": "", "[]*cmd/compile/internal/gc.Sig %#v": "", - "[]*cmd/compile/internal/ssa.Block %+v": "", "[]*cmd/compile/internal/ssa.Value %v": "", "[][]cmd/compile/internal/ssa.SlotID %v": "", "[]byte %s": "", diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index cf59e76d76..60c914d778 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -274,30 +274,14 @@ func BuildFuncDebug(f *Func, loggingEnabled bool) *FuncDebug { // Build up block states, starting with the first block, then // processing blocks once their predecessors have been processed. - // TODO: use a reverse post-order traversal instead of the work queue. - // Location list entries for each block. blockLocs := make([]*BlockDebug, f.NumBlocks()) - // Work queue of blocks to visit. Some of them may already be processed. - work := []*Block{f.Entry} - - for len(work) > 0 { - b := work[0] - work = work[1:] - if blockLocs[b.ID] != nil { - continue // already processed - } - if !state.predecessorsDone(b, blockLocs) { - continue // not ready yet - } - - for _, edge := range b.Succs { - if blockLocs[edge.Block().ID] != nil { - continue - } - work = append(work, edge.Block()) - } + // Reverse postorder: visit a block after as many as possible of its + // predecessors have been visited. + po := f.Postorder() + for i := len(po) - 1; i >= 0; i-- { + b := po[i] // Build the starting state for the block from the final // state of its predecessors. @@ -351,7 +335,7 @@ func BuildFuncDebug(f *Func, loggingEnabled bool) *FuncDebug { last.End = BlockEnd } if state.loggingEnabled { - f.Logf("Block done: locs %v, regs %v. work = %+v\n", state.BlockString(locs), state.registerContents, work) + f.Logf("Block done: locs %v, regs %v\n", state.BlockString(locs), state.registerContents) } blockLocs[b.ID] = locs } @@ -382,30 +366,6 @@ func isSynthetic(slot *LocalSlot) bool { return c == '.' || c == '~' } -// predecessorsDone reports whether block is ready to be processed. -func (state *debugState) predecessorsDone(b *Block, blockLocs []*BlockDebug) bool { - f := b.Func - for _, edge := range b.Preds { - // Ignore back branches, e.g. the continuation of a for loop. - // This may not work for functions with mutual gotos, which are not - // reducible, in which case debug information will be missing for any - // code after that point in the control flow. - if f.sdom().isAncestorEq(b, edge.b) { - if state.loggingEnabled { - f.Logf("ignoring back branch from %v to %v\n", edge.b, b) - } - continue // back branch - } - if blockLocs[edge.b.ID] == nil { - if state.loggingEnabled { - f.Logf("%v is not ready because %v isn't done\n", b, edge.b) - } - return false - } - } - return true -} - // mergePredecessors takes the end state of each of b's predecessors and // intersects them to form the starting state for b. // The registers slice (the second return value) will be reused for each call to mergePredecessors.