func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
// Run through the function in program text order, building up location
// lists as we go. The heavy lifting has mostly already been done.
+
for _, b := range state.f.Blocks {
if !blockLocs[b.ID].relevant {
continue
state.mergePredecessors(b, blockLocs)
zeroWidthPending := false
+ apcChangedSize := 0 // size of changedVars for leading Args, Phi, ClosurePtr
+ // expect to see values in pattern (apc)* (zerowidth|real)*
for _, v := range b.Values {
slots := state.valueNames[v.ID]
reg, _ := state.f.getHome(v.ID).(*Register)
- changed := state.processValue(v, slots, reg)
+ changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
if opcodeTable[v.Op].zeroWidth {
if changed {
+ if v.Op == OpArg || v.Op == OpPhi || v.Op.isLoweredGetClosurePtr() {
+ // These ranges begin at true beginning of block, not after first instruction
+ if zeroWidthPending {
+ b.Func.Fatalf("Unexpected op mixed with OpArg/OpPhi/OpLoweredGetClosurePtr at beginning of block %s in %s\n%s", b, b.Func.Name, b.Func)
+ }
+ apcChangedSize = len(state.changedVars.contents())
+ continue
+ }
+ // Other zero-width ops must wait on a "real" op.
zeroWidthPending = true
}
continue
if !changed && !zeroWidthPending {
continue
}
+ // Not zero-width; i.e., a "real" instruction.
zeroWidthPending = false
- for _, varID := range state.changedVars.contents() {
- state.updateVar(VarID(varID), v, state.currentState.slots)
+ for i, varID := range state.changedVars.contents() {
+ if i < apcChangedSize { // buffered true start-of-block changes
+ state.updateVar(VarID(varID), v.Block, BlockStart)
+ } else {
+ state.updateVar(VarID(varID), v.Block, v)
+ }
}
state.changedVars.clear()
+ apcChangedSize = 0
+ }
+ for i, varID := range state.changedVars.contents() {
+ if i < apcChangedSize { // buffered true start-of-block changes
+ state.updateVar(VarID(varID), b, BlockStart)
+ } else {
+ state.updateVar(VarID(varID), b, BlockEnd)
+ }
}
}
}
// updateVar updates the pending location list entry for varID to
-// reflect the new locations in curLoc, caused by v.
-func (state *debugState) updateVar(varID VarID, v *Value, curLoc []VarLoc) {
+// reflect the new locations in curLoc, beginning at v in block b.
+// v may be one of the special values indicating block start or end.
+func (state *debugState) updateVar(varID VarID, b *Block, v *Value) {
+ curLoc := state.currentState.slots
// Assemble the location list entry with whatever's live.
empty := true
for _, slotID := range state.varSlots[varID] {
}
pending := &state.pendingEntries[varID]
if empty {
- state.writePendingEntry(varID, v.Block.ID, v.ID)
+ state.writePendingEntry(varID, b.ID, v.ID)
pending.clear()
return
}
}
}
- state.writePendingEntry(varID, v.Block.ID, v.ID)
+ state.writePendingEntry(varID, b.ID, v.ID)
pending.present = true
- pending.startBlock = v.Block.ID
+ pending.startBlock = b.ID
pending.startValue = v.ID
for i, slot := range state.varSlots[varID] {
pending.pieces[i] = curLoc[slot]
return x.ID > y.ID
}
+func (op Op) isLoweredGetClosurePtr() bool {
+ switch op {
+ case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
+ Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr,
+ OpWasmLoweredGetClosurePtr:
+ return true
+ }
+ return false
+}
+
// Schedule the Values in each Block. After this phase returns, the
// order of b.Values matters and is the order in which those values
// will appear in the assembly output. For now it generates a
// Compute score. Larger numbers are scheduled closer to the end of the block.
for _, v := range b.Values {
switch {
- case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr ||
- v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr ||
- v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr ||
- v.Op == OpS390XLoweredGetClosurePtr || v.Op == OpMIPSLoweredGetClosurePtr ||
- v.Op == OpWasmLoweredGetClosurePtr:
+ case v.Op.isLoweredGetClosurePtr():
// We also score GetLoweredClosurePtr as early as possible to ensure that the
// context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no
}
}
- if b.Control != nil && b.Control.Op != OpPhi {
+ if b.Control != nil && b.Control.Op != OpPhi && b.Control.Op != OpArg {
// Force the control value to be scheduled at the end,
// unless it is a phi value (which must be first).
+ // OpArg also goes first -- if it is stack it register allocates
+ // to a LoadReg, if it is register it is from the beginning anyway.
score[b.Control.ID] = ScoreControl
// Schedule values dependent on the control value at the end.
canMove := make([]bool, f.NumValues())
for _, b := range f.Blocks {
for _, v := range b.Values {
+ if v.Op.isLoweredGetClosurePtr() {
+ // Must stay in the entry block.
+ continue
+ }
switch v.Op {
- case OpPhi, OpArg, OpSelect0, OpSelect1,
- OpAMD64LoweredGetClosurePtr, Op386LoweredGetClosurePtr,
- OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
- OpMIPSLoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr,
- OpS390XLoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr,
- OpWasmLoweredGetClosurePtr:
+ case OpPhi, OpArg, OpSelect0, OpSelect1:
// Phis need to stay in their block.
- // GetClosurePtr & Arg must stay in the entry block.
+ // Arg must stay in the entry block.
// Tuple selectors must stay with the tuple generator.
continue
}